]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.2.2-201201252117.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.2.2-201201252117.patch
CommitLineData
0a5b4650
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index dfa6fc6..0095943 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9+*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13@@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17+*.gmo
18 *.grep
19 *.grp
20 *.gz
21@@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25+*.vim
26 *.xml
27 *.xz
28 *_MODULES
29+*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33@@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37+PERF*
38 SCCS
39 System.map*
40 TAGS
41@@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45+builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51+clut_vga16.c
52+common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59+config.c
60 config.mak
61 config.mak.autogen
62+config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66@@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70+exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74@@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78+gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90@@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103-linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107@@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111-media
112 mconf
113+mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120+mkpiggy
121 mkprep
122 mkregtable
123 mktables
124@@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128+regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132@@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152+vmlinux.bin.bz2
153 vmlinux.lds
154+vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158@@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zconf.lex.c
169 zoffset.h
170diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171index 81c287f..d456d02 100644
172--- a/Documentation/kernel-parameters.txt
173+++ b/Documentation/kernel-parameters.txt
174@@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179+ virtualization environments that don't cope well with the
180+ expand down segment used by UDEREF on X86-32 or the frequent
181+ page table updates on X86-64.
182+
183+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184+
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188diff --git a/Makefile b/Makefile
189index 2f684da..bf21f8d 100644
190--- a/Makefile
191+++ b/Makefile
192@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197-HOSTCXXFLAGS = -O2
198+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208-PHONY += scripts_basic
209-scripts_basic:
210+PHONY += scripts_basic gcc-plugins
211+scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215@@ -564,6 +565,46 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219+ifndef DISABLE_PAX_PLUGINS
220+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223+endif
224+ifdef CONFIG_PAX_MEMORY_STACKLEAK
225+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227+endif
228+ifdef CONFIG_KALLOCSTAT_PLUGIN
229+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230+endif
231+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
234+endif
235+ifdef CONFIG_CHECKER_PLUGIN
236+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
237+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
238+endif
239+endif
240+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
241+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
242+ifeq ($(KBUILD_EXTMOD),)
243+gcc-plugins:
244+ $(Q)$(MAKE) $(build)=tools/gcc
245+else
246+gcc-plugins: ;
247+endif
248+else
249+gcc-plugins:
250+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
251+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
252+else
253+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
254+endif
255+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
256+endif
257+endif
258+
259 include $(srctree)/arch/$(SRCARCH)/Makefile
260
261 ifneq ($(CONFIG_FRAME_WARN),0)
262@@ -708,7 +749,7 @@ export mod_strip_cmd
263
264
265 ifeq ($(KBUILD_EXTMOD),)
266-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
267+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
268
269 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
270 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
271@@ -932,6 +973,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
272
273 # The actual objects are generated when descending,
274 # make sure no implicit rule kicks in
275+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
276 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
277
278 # Handle descending into subdirectories listed in $(vmlinux-dirs)
279@@ -941,7 +983,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
280 # Error messages still appears in the original language
281
282 PHONY += $(vmlinux-dirs)
283-$(vmlinux-dirs): prepare scripts
284+$(vmlinux-dirs): gcc-plugins prepare scripts
285 $(Q)$(MAKE) $(build)=$@
286
287 # Store (new) KERNELRELASE string in include/config/kernel.release
288@@ -985,6 +1027,7 @@ prepare0: archprepare FORCE
289 $(Q)$(MAKE) $(build)=.
290
291 # All the preparing..
292+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
293 prepare: prepare0
294
295 # Generate some files
296@@ -1086,6 +1129,7 @@ all: modules
297 # using awk while concatenating to the final file.
298
299 PHONY += modules
300+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
301 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
302 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
303 @$(kecho) ' Building modules, stage 2.';
304@@ -1101,7 +1145,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
305
306 # Target to prepare building external modules
307 PHONY += modules_prepare
308-modules_prepare: prepare scripts
309+modules_prepare: gcc-plugins prepare scripts
310
311 # Target to install modules
312 PHONY += modules_install
313@@ -1198,6 +1242,7 @@ distclean: mrproper
314 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
315 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
316 -o -name '.*.rej' \
317+ -o -name '.*.rej' -o -name '*.so' \
318 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
319 -type f -print | xargs rm -f
320
321@@ -1358,6 +1403,7 @@ PHONY += $(module-dirs) modules
322 $(module-dirs): crmodverdir $(objtree)/Module.symvers
323 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
324
325+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
326 modules: $(module-dirs)
327 @$(kecho) ' Building modules, stage 2.';
328 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
329@@ -1484,17 +1530,19 @@ else
330 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
331 endif
332
333-%.s: %.c prepare scripts FORCE
334+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
335+%.s: %.c gcc-plugins prepare scripts FORCE
336 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
337 %.i: %.c prepare scripts FORCE
338 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
339-%.o: %.c prepare scripts FORCE
340+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
341+%.o: %.c gcc-plugins prepare scripts FORCE
342 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
343 %.lst: %.c prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345-%.s: %.S prepare scripts FORCE
346+%.s: %.S gcc-plugins prepare scripts FORCE
347 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
348-%.o: %.S prepare scripts FORCE
349+%.o: %.S gcc-plugins prepare scripts FORCE
350 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
351 %.symtypes: %.c prepare scripts FORCE
352 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
353@@ -1504,11 +1552,13 @@ endif
354 $(cmd_crmodverdir)
355 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
356 $(build)=$(build-dir)
357-%/: prepare scripts FORCE
358+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
359+%/: gcc-plugins prepare scripts FORCE
360 $(cmd_crmodverdir)
361 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
362 $(build)=$(build-dir)
363-%.ko: prepare scripts FORCE
364+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
365+%.ko: gcc-plugins prepare scripts FORCE
366 $(cmd_crmodverdir)
367 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
368 $(build)=$(build-dir) $(@:.ko=.o)
369diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
370index da5449e..7418343 100644
371--- a/arch/alpha/include/asm/elf.h
372+++ b/arch/alpha/include/asm/elf.h
373@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
374
375 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
376
377+#ifdef CONFIG_PAX_ASLR
378+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
379+
380+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
381+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
382+#endif
383+
384 /* $0 is set by ld.so to a pointer to a function which might be
385 registered using atexit. This provides a mean for the dynamic
386 linker to call DT_FINI functions for shared libraries that have
387diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
388index de98a73..bd4f1f8 100644
389--- a/arch/alpha/include/asm/pgtable.h
390+++ b/arch/alpha/include/asm/pgtable.h
391@@ -101,6 +101,17 @@ struct vm_area_struct;
392 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
393 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
394 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
395+
396+#ifdef CONFIG_PAX_PAGEEXEC
397+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
398+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
399+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
400+#else
401+# define PAGE_SHARED_NOEXEC PAGE_SHARED
402+# define PAGE_COPY_NOEXEC PAGE_COPY
403+# define PAGE_READONLY_NOEXEC PAGE_READONLY
404+#endif
405+
406 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
407
408 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
409diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
410index 2fd00b7..cfd5069 100644
411--- a/arch/alpha/kernel/module.c
412+++ b/arch/alpha/kernel/module.c
413@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
414
415 /* The small sections were sorted to the end of the segment.
416 The following should definitely cover them. */
417- gp = (u64)me->module_core + me->core_size - 0x8000;
418+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
419 got = sechdrs[me->arch.gotsecindex].sh_addr;
420
421 for (i = 0; i < n; i++) {
422diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
423index 01e8715..be0e80f 100644
424--- a/arch/alpha/kernel/osf_sys.c
425+++ b/arch/alpha/kernel/osf_sys.c
426@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
427 /* At this point: (!vma || addr < vma->vm_end). */
428 if (limit - len < addr)
429 return -ENOMEM;
430- if (!vma || addr + len <= vma->vm_start)
431+ if (check_heap_stack_gap(vma, addr, len))
432 return addr;
433 addr = vma->vm_end;
434 vma = vma->vm_next;
435@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
436 merely specific addresses, but regions of memory -- perhaps
437 this feature should be incorporated into all ports? */
438
439+#ifdef CONFIG_PAX_RANDMMAP
440+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
441+#endif
442+
443 if (addr) {
444 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
445 if (addr != (unsigned long) -ENOMEM)
446@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
447 }
448
449 /* Next, try allocating at TASK_UNMAPPED_BASE. */
450- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
451- len, limit);
452+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
453+
454 if (addr != (unsigned long) -ENOMEM)
455 return addr;
456
457diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
458index fadd5f8..904e73a 100644
459--- a/arch/alpha/mm/fault.c
460+++ b/arch/alpha/mm/fault.c
461@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
462 __reload_thread(pcb);
463 }
464
465+#ifdef CONFIG_PAX_PAGEEXEC
466+/*
467+ * PaX: decide what to do with offenders (regs->pc = fault address)
468+ *
469+ * returns 1 when task should be killed
470+ * 2 when patched PLT trampoline was detected
471+ * 3 when unpatched PLT trampoline was detected
472+ */
473+static int pax_handle_fetch_fault(struct pt_regs *regs)
474+{
475+
476+#ifdef CONFIG_PAX_EMUPLT
477+ int err;
478+
479+ do { /* PaX: patched PLT emulation #1 */
480+ unsigned int ldah, ldq, jmp;
481+
482+ err = get_user(ldah, (unsigned int *)regs->pc);
483+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
484+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
485+
486+ if (err)
487+ break;
488+
489+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
490+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
491+ jmp == 0x6BFB0000U)
492+ {
493+ unsigned long r27, addr;
494+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
495+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
496+
497+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
498+ err = get_user(r27, (unsigned long *)addr);
499+ if (err)
500+ break;
501+
502+ regs->r27 = r27;
503+ regs->pc = r27;
504+ return 2;
505+ }
506+ } while (0);
507+
508+ do { /* PaX: patched PLT emulation #2 */
509+ unsigned int ldah, lda, br;
510+
511+ err = get_user(ldah, (unsigned int *)regs->pc);
512+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
513+ err |= get_user(br, (unsigned int *)(regs->pc+8));
514+
515+ if (err)
516+ break;
517+
518+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
519+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
520+ (br & 0xFFE00000U) == 0xC3E00000U)
521+ {
522+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
523+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
524+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
525+
526+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
527+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
528+ return 2;
529+ }
530+ } while (0);
531+
532+ do { /* PaX: unpatched PLT emulation */
533+ unsigned int br;
534+
535+ err = get_user(br, (unsigned int *)regs->pc);
536+
537+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
538+ unsigned int br2, ldq, nop, jmp;
539+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
540+
541+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
542+ err = get_user(br2, (unsigned int *)addr);
543+ err |= get_user(ldq, (unsigned int *)(addr+4));
544+ err |= get_user(nop, (unsigned int *)(addr+8));
545+ err |= get_user(jmp, (unsigned int *)(addr+12));
546+ err |= get_user(resolver, (unsigned long *)(addr+16));
547+
548+ if (err)
549+ break;
550+
551+ if (br2 == 0xC3600000U &&
552+ ldq == 0xA77B000CU &&
553+ nop == 0x47FF041FU &&
554+ jmp == 0x6B7B0000U)
555+ {
556+ regs->r28 = regs->pc+4;
557+ regs->r27 = addr+16;
558+ regs->pc = resolver;
559+ return 3;
560+ }
561+ }
562+ } while (0);
563+#endif
564+
565+ return 1;
566+}
567+
568+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
569+{
570+ unsigned long i;
571+
572+ printk(KERN_ERR "PAX: bytes at PC: ");
573+ for (i = 0; i < 5; i++) {
574+ unsigned int c;
575+ if (get_user(c, (unsigned int *)pc+i))
576+ printk(KERN_CONT "???????? ");
577+ else
578+ printk(KERN_CONT "%08x ", c);
579+ }
580+ printk("\n");
581+}
582+#endif
583
584 /*
585 * This routine handles page faults. It determines the address,
586@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
587 good_area:
588 si_code = SEGV_ACCERR;
589 if (cause < 0) {
590- if (!(vma->vm_flags & VM_EXEC))
591+ if (!(vma->vm_flags & VM_EXEC)) {
592+
593+#ifdef CONFIG_PAX_PAGEEXEC
594+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
595+ goto bad_area;
596+
597+ up_read(&mm->mmap_sem);
598+ switch (pax_handle_fetch_fault(regs)) {
599+
600+#ifdef CONFIG_PAX_EMUPLT
601+ case 2:
602+ case 3:
603+ return;
604+#endif
605+
606+ }
607+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
608+ do_group_exit(SIGKILL);
609+#else
610 goto bad_area;
611+#endif
612+
613+ }
614 } else if (!cause) {
615 /* Allow reads even for write-only mappings */
616 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
617diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
618index 86976d0..8a57797 100644
619--- a/arch/arm/include/asm/atomic.h
620+++ b/arch/arm/include/asm/atomic.h
621@@ -239,6 +239,14 @@ typedef struct {
622 u64 __aligned(8) counter;
623 } atomic64_t;
624
625+#ifdef CONFIG_PAX_REFCOUNT
626+typedef struct {
627+ u64 __aligned(8) counter;
628+} atomic64_unchecked_t;
629+#else
630+typedef atomic64_t atomic64_unchecked_t;
631+#endif
632+
633 #define ATOMIC64_INIT(i) { (i) }
634
635 static inline u64 atomic64_read(atomic64_t *v)
636diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
637index 0e9ce8d..6ef1e03 100644
638--- a/arch/arm/include/asm/elf.h
639+++ b/arch/arm/include/asm/elf.h
640@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
641 the loader. We need to make sure that it is out of the way of the program
642 that it will "exec", and that there is sufficient room for the brk. */
643
644-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
645+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
646+
647+#ifdef CONFIG_PAX_ASLR
648+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
649+
650+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
651+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
652+#endif
653
654 /* When the program starts, a1 contains a pointer to a function to be
655 registered with atexit, as per the SVR4 ABI. A value of 0 means we
656@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
657 extern void elf_set_personality(const struct elf32_hdr *);
658 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
659
660-struct mm_struct;
661-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
662-#define arch_randomize_brk arch_randomize_brk
663-
664 extern int vectors_user_mapping(void);
665 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
666 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
667diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
668index e51b1e8..32a3113 100644
669--- a/arch/arm/include/asm/kmap_types.h
670+++ b/arch/arm/include/asm/kmap_types.h
671@@ -21,6 +21,7 @@ enum km_type {
672 KM_L1_CACHE,
673 KM_L2_CACHE,
674 KM_KDB,
675+ KM_CLEARPAGE,
676 KM_TYPE_NR
677 };
678
679diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
680index b293616..96310e5 100644
681--- a/arch/arm/include/asm/uaccess.h
682+++ b/arch/arm/include/asm/uaccess.h
683@@ -22,6 +22,8 @@
684 #define VERIFY_READ 0
685 #define VERIFY_WRITE 1
686
687+extern void check_object_size(const void *ptr, unsigned long n, bool to);
688+
689 /*
690 * The exception table consists of pairs of addresses: the first is the
691 * address of an instruction that is allowed to fault, and the second is
692@@ -387,8 +389,23 @@ do { \
693
694
695 #ifdef CONFIG_MMU
696-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
697-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
698+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
699+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
700+
701+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
702+{
703+ if (!__builtin_constant_p(n))
704+ check_object_size(to, n, false);
705+ return ___copy_from_user(to, from, n);
706+}
707+
708+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
709+{
710+ if (!__builtin_constant_p(n))
711+ check_object_size(from, n, true);
712+ return ___copy_to_user(to, from, n);
713+}
714+
715 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
716 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
717 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
718@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
719
720 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
721 {
722+ if ((long)n < 0)
723+ return n;
724+
725 if (access_ok(VERIFY_READ, from, n))
726 n = __copy_from_user(to, from, n);
727 else /* security hole - plug it */
728@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
729
730 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
731 {
732+ if ((long)n < 0)
733+ return n;
734+
735 if (access_ok(VERIFY_WRITE, to, n))
736 n = __copy_to_user(to, from, n);
737 return n;
738diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
739index 5b0bce6..becd81c 100644
740--- a/arch/arm/kernel/armksyms.c
741+++ b/arch/arm/kernel/armksyms.c
742@@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
743 #ifdef CONFIG_MMU
744 EXPORT_SYMBOL(copy_page);
745
746-EXPORT_SYMBOL(__copy_from_user);
747-EXPORT_SYMBOL(__copy_to_user);
748+EXPORT_SYMBOL(___copy_from_user);
749+EXPORT_SYMBOL(___copy_to_user);
750 EXPORT_SYMBOL(__clear_user);
751
752 EXPORT_SYMBOL(__get_user_1);
753diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
754index 3d0c6fb..3dcae52 100644
755--- a/arch/arm/kernel/process.c
756+++ b/arch/arm/kernel/process.c
757@@ -28,7 +28,6 @@
758 #include <linux/tick.h>
759 #include <linux/utsname.h>
760 #include <linux/uaccess.h>
761-#include <linux/random.h>
762 #include <linux/hw_breakpoint.h>
763 #include <linux/cpuidle.h>
764
765@@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
766 return 0;
767 }
768
769-unsigned long arch_randomize_brk(struct mm_struct *mm)
770-{
771- unsigned long range_end = mm->brk + 0x02000000;
772- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
773-}
774-
775 #ifdef CONFIG_MMU
776 /*
777 * The vectors page is always readable from user space for the
778diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
779index 99a5727..a3d5bb1 100644
780--- a/arch/arm/kernel/traps.c
781+++ b/arch/arm/kernel/traps.c
782@@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
783
784 static DEFINE_RAW_SPINLOCK(die_lock);
785
786+extern void gr_handle_kernel_exploit(void);
787+
788 /*
789 * This function is protected against re-entrancy.
790 */
791@@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
792 panic("Fatal exception in interrupt");
793 if (panic_on_oops)
794 panic("Fatal exception");
795+
796+ gr_handle_kernel_exploit();
797+
798 if (ret != NOTIFY_STOP)
799 do_exit(SIGSEGV);
800 }
801diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
802index 66a477a..bee61d3 100644
803--- a/arch/arm/lib/copy_from_user.S
804+++ b/arch/arm/lib/copy_from_user.S
805@@ -16,7 +16,7 @@
806 /*
807 * Prototype:
808 *
809- * size_t __copy_from_user(void *to, const void *from, size_t n)
810+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
811 *
812 * Purpose:
813 *
814@@ -84,11 +84,11 @@
815
816 .text
817
818-ENTRY(__copy_from_user)
819+ENTRY(___copy_from_user)
820
821 #include "copy_template.S"
822
823-ENDPROC(__copy_from_user)
824+ENDPROC(___copy_from_user)
825
826 .pushsection .fixup,"ax"
827 .align 0
828diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
829index d066df6..df28194 100644
830--- a/arch/arm/lib/copy_to_user.S
831+++ b/arch/arm/lib/copy_to_user.S
832@@ -16,7 +16,7 @@
833 /*
834 * Prototype:
835 *
836- * size_t __copy_to_user(void *to, const void *from, size_t n)
837+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
838 *
839 * Purpose:
840 *
841@@ -88,11 +88,11 @@
842 .text
843
844 ENTRY(__copy_to_user_std)
845-WEAK(__copy_to_user)
846+WEAK(___copy_to_user)
847
848 #include "copy_template.S"
849
850-ENDPROC(__copy_to_user)
851+ENDPROC(___copy_to_user)
852 ENDPROC(__copy_to_user_std)
853
854 .pushsection .fixup,"ax"
855diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
856index d0ece2a..5ae2f39 100644
857--- a/arch/arm/lib/uaccess.S
858+++ b/arch/arm/lib/uaccess.S
859@@ -20,7 +20,7 @@
860
861 #define PAGE_SHIFT 12
862
863-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
864+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
865 * Purpose : copy a block to user memory from kernel memory
866 * Params : to - user memory
867 * : from - kernel memory
868@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
869 sub r2, r2, ip
870 b .Lc2u_dest_aligned
871
872-ENTRY(__copy_to_user)
873+ENTRY(___copy_to_user)
874 stmfd sp!, {r2, r4 - r7, lr}
875 cmp r2, #4
876 blt .Lc2u_not_enough
877@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
878 ldrgtb r3, [r1], #0
879 USER( T(strgtb) r3, [r0], #1) @ May fault
880 b .Lc2u_finished
881-ENDPROC(__copy_to_user)
882+ENDPROC(___copy_to_user)
883
884 .pushsection .fixup,"ax"
885 .align 0
886 9001: ldmfd sp!, {r0, r4 - r7, pc}
887 .popsection
888
889-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
890+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
891 * Purpose : copy a block from user memory to kernel memory
892 * Params : to - kernel memory
893 * : from - user memory
894@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
895 sub r2, r2, ip
896 b .Lcfu_dest_aligned
897
898-ENTRY(__copy_from_user)
899+ENTRY(___copy_from_user)
900 stmfd sp!, {r0, r2, r4 - r7, lr}
901 cmp r2, #4
902 blt .Lcfu_not_enough
903@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
904 USER( T(ldrgtb) r3, [r1], #1) @ May fault
905 strgtb r3, [r0], #1
906 b .Lcfu_finished
907-ENDPROC(__copy_from_user)
908+ENDPROC(___copy_from_user)
909
910 .pushsection .fixup,"ax"
911 .align 0
912diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
913index 025f742..8432b08 100644
914--- a/arch/arm/lib/uaccess_with_memcpy.c
915+++ b/arch/arm/lib/uaccess_with_memcpy.c
916@@ -104,7 +104,7 @@ out:
917 }
918
919 unsigned long
920-__copy_to_user(void __user *to, const void *from, unsigned long n)
921+___copy_to_user(void __user *to, const void *from, unsigned long n)
922 {
923 /*
924 * This test is stubbed out of the main function above to keep
925diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
926index 2b2d51c..0127490 100644
927--- a/arch/arm/mach-ux500/mbox-db5500.c
928+++ b/arch/arm/mach-ux500/mbox-db5500.c
929@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
930 return sprintf(buf, "0x%X\n", mbox_value);
931 }
932
933-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
934+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
935
936 static int mbox_show(struct seq_file *s, void *data)
937 {
938diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
939index aa33949..b242a2f 100644
940--- a/arch/arm/mm/fault.c
941+++ b/arch/arm/mm/fault.c
942@@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
943 }
944 #endif
945
946+#ifdef CONFIG_PAX_PAGEEXEC
947+ if (fsr & FSR_LNX_PF) {
948+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
949+ do_group_exit(SIGKILL);
950+ }
951+#endif
952+
953 tsk->thread.address = addr;
954 tsk->thread.error_code = fsr;
955 tsk->thread.trap_no = 14;
956@@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
957 }
958 #endif /* CONFIG_MMU */
959
960+#ifdef CONFIG_PAX_PAGEEXEC
961+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
962+{
963+ long i;
964+
965+ printk(KERN_ERR "PAX: bytes at PC: ");
966+ for (i = 0; i < 20; i++) {
967+ unsigned char c;
968+ if (get_user(c, (__force unsigned char __user *)pc+i))
969+ printk(KERN_CONT "?? ");
970+ else
971+ printk(KERN_CONT "%02x ", c);
972+ }
973+ printk("\n");
974+
975+ printk(KERN_ERR "PAX: bytes at SP-4: ");
976+ for (i = -1; i < 20; i++) {
977+ unsigned long c;
978+ if (get_user(c, (__force unsigned long __user *)sp+i))
979+ printk(KERN_CONT "???????? ");
980+ else
981+ printk(KERN_CONT "%08lx ", c);
982+ }
983+ printk("\n");
984+}
985+#endif
986+
987 /*
988 * First Level Translation Fault Handler
989 *
990diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
991index 44b628e..623ee2a 100644
992--- a/arch/arm/mm/mmap.c
993+++ b/arch/arm/mm/mmap.c
994@@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
995 if (len > TASK_SIZE)
996 return -ENOMEM;
997
998+#ifdef CONFIG_PAX_RANDMMAP
999+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1000+#endif
1001+
1002 if (addr) {
1003 if (do_align)
1004 addr = COLOUR_ALIGN(addr, pgoff);
1005@@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1006 addr = PAGE_ALIGN(addr);
1007
1008 vma = find_vma(mm, addr);
1009- if (TASK_SIZE - len >= addr &&
1010- (!vma || addr + len <= vma->vm_start))
1011+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1012 return addr;
1013 }
1014 if (len > mm->cached_hole_size) {
1015- start_addr = addr = mm->free_area_cache;
1016+ start_addr = addr = mm->free_area_cache;
1017 } else {
1018- start_addr = addr = TASK_UNMAPPED_BASE;
1019- mm->cached_hole_size = 0;
1020+ start_addr = addr = mm->mmap_base;
1021+ mm->cached_hole_size = 0;
1022 }
1023 /* 8 bits of randomness in 20 address space bits */
1024 if ((current->flags & PF_RANDOMIZE) &&
1025@@ -89,14 +92,14 @@ full_search:
1026 * Start a new search - just in case we missed
1027 * some holes.
1028 */
1029- if (start_addr != TASK_UNMAPPED_BASE) {
1030- start_addr = addr = TASK_UNMAPPED_BASE;
1031+ if (start_addr != mm->mmap_base) {
1032+ start_addr = addr = mm->mmap_base;
1033 mm->cached_hole_size = 0;
1034 goto full_search;
1035 }
1036 return -ENOMEM;
1037 }
1038- if (!vma || addr + len <= vma->vm_start) {
1039+ if (check_heap_stack_gap(vma, addr, len)) {
1040 /*
1041 * Remember the place where we stopped the search:
1042 */
1043diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1044index 3b3159b..425ea94 100644
1045--- a/arch/avr32/include/asm/elf.h
1046+++ b/arch/avr32/include/asm/elf.h
1047@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1048 the loader. We need to make sure that it is out of the way of the program
1049 that it will "exec", and that there is sufficient room for the brk. */
1050
1051-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1052+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1053
1054+#ifdef CONFIG_PAX_ASLR
1055+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1056+
1057+#define PAX_DELTA_MMAP_LEN 15
1058+#define PAX_DELTA_STACK_LEN 15
1059+#endif
1060
1061 /* This yields a mask that user programs can use to figure out what
1062 instruction set this CPU supports. This could be done in user space,
1063diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1064index b7f5c68..556135c 100644
1065--- a/arch/avr32/include/asm/kmap_types.h
1066+++ b/arch/avr32/include/asm/kmap_types.h
1067@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1068 D(11) KM_IRQ1,
1069 D(12) KM_SOFTIRQ0,
1070 D(13) KM_SOFTIRQ1,
1071-D(14) KM_TYPE_NR
1072+D(14) KM_CLEARPAGE,
1073+D(15) KM_TYPE_NR
1074 };
1075
1076 #undef D
1077diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1078index f7040a1..db9f300 100644
1079--- a/arch/avr32/mm/fault.c
1080+++ b/arch/avr32/mm/fault.c
1081@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1082
1083 int exception_trace = 1;
1084
1085+#ifdef CONFIG_PAX_PAGEEXEC
1086+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1087+{
1088+ unsigned long i;
1089+
1090+ printk(KERN_ERR "PAX: bytes at PC: ");
1091+ for (i = 0; i < 20; i++) {
1092+ unsigned char c;
1093+ if (get_user(c, (unsigned char *)pc+i))
1094+ printk(KERN_CONT "???????? ");
1095+ else
1096+ printk(KERN_CONT "%02x ", c);
1097+ }
1098+ printk("\n");
1099+}
1100+#endif
1101+
1102 /*
1103 * This routine handles page faults. It determines the address and the
1104 * problem, and then passes it off to one of the appropriate routines.
1105@@ -156,6 +173,16 @@ bad_area:
1106 up_read(&mm->mmap_sem);
1107
1108 if (user_mode(regs)) {
1109+
1110+#ifdef CONFIG_PAX_PAGEEXEC
1111+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1112+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1113+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1114+ do_group_exit(SIGKILL);
1115+ }
1116+ }
1117+#endif
1118+
1119 if (exception_trace && printk_ratelimit())
1120 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1121 "sp %08lx ecr %lu\n",
1122diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1123index f8e16b2..c73ff79 100644
1124--- a/arch/frv/include/asm/kmap_types.h
1125+++ b/arch/frv/include/asm/kmap_types.h
1126@@ -23,6 +23,7 @@ enum km_type {
1127 KM_IRQ1,
1128 KM_SOFTIRQ0,
1129 KM_SOFTIRQ1,
1130+ KM_CLEARPAGE,
1131 KM_TYPE_NR
1132 };
1133
1134diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1135index 385fd30..6c3d97e 100644
1136--- a/arch/frv/mm/elf-fdpic.c
1137+++ b/arch/frv/mm/elf-fdpic.c
1138@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1139 if (addr) {
1140 addr = PAGE_ALIGN(addr);
1141 vma = find_vma(current->mm, addr);
1142- if (TASK_SIZE - len >= addr &&
1143- (!vma || addr + len <= vma->vm_start))
1144+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1145 goto success;
1146 }
1147
1148@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1149 for (; vma; vma = vma->vm_next) {
1150 if (addr > limit)
1151 break;
1152- if (addr + len <= vma->vm_start)
1153+ if (check_heap_stack_gap(vma, addr, len))
1154 goto success;
1155 addr = vma->vm_end;
1156 }
1157@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1158 for (; vma; vma = vma->vm_next) {
1159 if (addr > limit)
1160 break;
1161- if (addr + len <= vma->vm_start)
1162+ if (check_heap_stack_gap(vma, addr, len))
1163 goto success;
1164 addr = vma->vm_end;
1165 }
1166diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1167index b5298eb..67c6e62 100644
1168--- a/arch/ia64/include/asm/elf.h
1169+++ b/arch/ia64/include/asm/elf.h
1170@@ -42,6 +42,13 @@
1171 */
1172 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1173
1174+#ifdef CONFIG_PAX_ASLR
1175+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1176+
1177+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1178+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1179+#endif
1180+
1181 #define PT_IA_64_UNWIND 0x70000001
1182
1183 /* IA-64 relocations: */
1184diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1185index 1a97af3..7529d31 100644
1186--- a/arch/ia64/include/asm/pgtable.h
1187+++ b/arch/ia64/include/asm/pgtable.h
1188@@ -12,7 +12,7 @@
1189 * David Mosberger-Tang <davidm@hpl.hp.com>
1190 */
1191
1192-
1193+#include <linux/const.h>
1194 #include <asm/mman.h>
1195 #include <asm/page.h>
1196 #include <asm/processor.h>
1197@@ -143,6 +143,17 @@
1198 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1199 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1200 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1201+
1202+#ifdef CONFIG_PAX_PAGEEXEC
1203+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1204+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1205+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1206+#else
1207+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1208+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1209+# define PAGE_COPY_NOEXEC PAGE_COPY
1210+#endif
1211+
1212 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1213 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1214 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1215diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1216index b77768d..e0795eb 100644
1217--- a/arch/ia64/include/asm/spinlock.h
1218+++ b/arch/ia64/include/asm/spinlock.h
1219@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1220 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1221
1222 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1223- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1224+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1225 }
1226
1227 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1228diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1229index 449c8c0..432a3d2 100644
1230--- a/arch/ia64/include/asm/uaccess.h
1231+++ b/arch/ia64/include/asm/uaccess.h
1232@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1233 const void *__cu_from = (from); \
1234 long __cu_len = (n); \
1235 \
1236- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1237+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1238 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1239 __cu_len; \
1240 })
1241@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1242 long __cu_len = (n); \
1243 \
1244 __chk_user_ptr(__cu_from); \
1245- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1246+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1247 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1248 __cu_len; \
1249 })
1250diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1251index 24603be..948052d 100644
1252--- a/arch/ia64/kernel/module.c
1253+++ b/arch/ia64/kernel/module.c
1254@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1255 void
1256 module_free (struct module *mod, void *module_region)
1257 {
1258- if (mod && mod->arch.init_unw_table &&
1259- module_region == mod->module_init) {
1260+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1261 unw_remove_unwind_table(mod->arch.init_unw_table);
1262 mod->arch.init_unw_table = NULL;
1263 }
1264@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1265 }
1266
1267 static inline int
1268+in_init_rx (const struct module *mod, uint64_t addr)
1269+{
1270+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1271+}
1272+
1273+static inline int
1274+in_init_rw (const struct module *mod, uint64_t addr)
1275+{
1276+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1277+}
1278+
1279+static inline int
1280 in_init (const struct module *mod, uint64_t addr)
1281 {
1282- return addr - (uint64_t) mod->module_init < mod->init_size;
1283+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1284+}
1285+
1286+static inline int
1287+in_core_rx (const struct module *mod, uint64_t addr)
1288+{
1289+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1290+}
1291+
1292+static inline int
1293+in_core_rw (const struct module *mod, uint64_t addr)
1294+{
1295+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1296 }
1297
1298 static inline int
1299 in_core (const struct module *mod, uint64_t addr)
1300 {
1301- return addr - (uint64_t) mod->module_core < mod->core_size;
1302+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1303 }
1304
1305 static inline int
1306@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1307 break;
1308
1309 case RV_BDREL:
1310- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1311+ if (in_init_rx(mod, val))
1312+ val -= (uint64_t) mod->module_init_rx;
1313+ else if (in_init_rw(mod, val))
1314+ val -= (uint64_t) mod->module_init_rw;
1315+ else if (in_core_rx(mod, val))
1316+ val -= (uint64_t) mod->module_core_rx;
1317+ else if (in_core_rw(mod, val))
1318+ val -= (uint64_t) mod->module_core_rw;
1319 break;
1320
1321 case RV_LTV:
1322@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1323 * addresses have been selected...
1324 */
1325 uint64_t gp;
1326- if (mod->core_size > MAX_LTOFF)
1327+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1328 /*
1329 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1330 * at the end of the module.
1331 */
1332- gp = mod->core_size - MAX_LTOFF / 2;
1333+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1334 else
1335- gp = mod->core_size / 2;
1336- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1337+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1338+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1339 mod->arch.gp = gp;
1340 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1341 }
1342diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1343index 609d500..7dde2a8 100644
1344--- a/arch/ia64/kernel/sys_ia64.c
1345+++ b/arch/ia64/kernel/sys_ia64.c
1346@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1347 if (REGION_NUMBER(addr) == RGN_HPAGE)
1348 addr = 0;
1349 #endif
1350+
1351+#ifdef CONFIG_PAX_RANDMMAP
1352+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1353+ addr = mm->free_area_cache;
1354+ else
1355+#endif
1356+
1357 if (!addr)
1358 addr = mm->free_area_cache;
1359
1360@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1361 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1362 /* At this point: (!vma || addr < vma->vm_end). */
1363 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1364- if (start_addr != TASK_UNMAPPED_BASE) {
1365+ if (start_addr != mm->mmap_base) {
1366 /* Start a new search --- just in case we missed some holes. */
1367- addr = TASK_UNMAPPED_BASE;
1368+ addr = mm->mmap_base;
1369 goto full_search;
1370 }
1371 return -ENOMEM;
1372 }
1373- if (!vma || addr + len <= vma->vm_start) {
1374+ if (check_heap_stack_gap(vma, addr, len)) {
1375 /* Remember the address where we stopped this search: */
1376 mm->free_area_cache = addr + len;
1377 return addr;
1378diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1379index 53c0ba0..2accdde 100644
1380--- a/arch/ia64/kernel/vmlinux.lds.S
1381+++ b/arch/ia64/kernel/vmlinux.lds.S
1382@@ -199,7 +199,7 @@ SECTIONS {
1383 /* Per-cpu data: */
1384 . = ALIGN(PERCPU_PAGE_SIZE);
1385 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1386- __phys_per_cpu_start = __per_cpu_load;
1387+ __phys_per_cpu_start = per_cpu_load;
1388 /*
1389 * ensure percpu data fits
1390 * into percpu page size
1391diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1392index 20b3593..1ce77f0 100644
1393--- a/arch/ia64/mm/fault.c
1394+++ b/arch/ia64/mm/fault.c
1395@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1396 return pte_present(pte);
1397 }
1398
1399+#ifdef CONFIG_PAX_PAGEEXEC
1400+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1401+{
1402+ unsigned long i;
1403+
1404+ printk(KERN_ERR "PAX: bytes at PC: ");
1405+ for (i = 0; i < 8; i++) {
1406+ unsigned int c;
1407+ if (get_user(c, (unsigned int *)pc+i))
1408+ printk(KERN_CONT "???????? ");
1409+ else
1410+ printk(KERN_CONT "%08x ", c);
1411+ }
1412+ printk("\n");
1413+}
1414+#endif
1415+
1416 void __kprobes
1417 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1418 {
1419@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1420 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1421 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1422
1423- if ((vma->vm_flags & mask) != mask)
1424+ if ((vma->vm_flags & mask) != mask) {
1425+
1426+#ifdef CONFIG_PAX_PAGEEXEC
1427+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1428+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1429+ goto bad_area;
1430+
1431+ up_read(&mm->mmap_sem);
1432+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1433+ do_group_exit(SIGKILL);
1434+ }
1435+#endif
1436+
1437 goto bad_area;
1438
1439+ }
1440+
1441 /*
1442 * If for any reason at all we couldn't handle the fault, make
1443 * sure we exit gracefully rather than endlessly redo the
1444diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1445index 5ca674b..e0e1b70 100644
1446--- a/arch/ia64/mm/hugetlbpage.c
1447+++ b/arch/ia64/mm/hugetlbpage.c
1448@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1449 /* At this point: (!vmm || addr < vmm->vm_end). */
1450 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1451 return -ENOMEM;
1452- if (!vmm || (addr + len) <= vmm->vm_start)
1453+ if (check_heap_stack_gap(vmm, addr, len))
1454 return addr;
1455 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1456 }
1457diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1458index 00cb0e2..2ad8024 100644
1459--- a/arch/ia64/mm/init.c
1460+++ b/arch/ia64/mm/init.c
1461@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1462 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1463 vma->vm_end = vma->vm_start + PAGE_SIZE;
1464 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1465+
1466+#ifdef CONFIG_PAX_PAGEEXEC
1467+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1468+ vma->vm_flags &= ~VM_EXEC;
1469+
1470+#ifdef CONFIG_PAX_MPROTECT
1471+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1472+ vma->vm_flags &= ~VM_MAYEXEC;
1473+#endif
1474+
1475+ }
1476+#endif
1477+
1478 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1479 down_write(&current->mm->mmap_sem);
1480 if (insert_vm_struct(current->mm, vma)) {
1481diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1482index 82abd15..d95ae5d 100644
1483--- a/arch/m32r/lib/usercopy.c
1484+++ b/arch/m32r/lib/usercopy.c
1485@@ -14,6 +14,9 @@
1486 unsigned long
1487 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1488 {
1489+ if ((long)n < 0)
1490+ return n;
1491+
1492 prefetch(from);
1493 if (access_ok(VERIFY_WRITE, to, n))
1494 __copy_user(to,from,n);
1495@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1496 unsigned long
1497 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1498 {
1499+ if ((long)n < 0)
1500+ return n;
1501+
1502 prefetchw(to);
1503 if (access_ok(VERIFY_READ, from, n))
1504 __copy_user_zeroing(to,from,n);
1505diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1506index 455c0ac..ad65fbe 100644
1507--- a/arch/mips/include/asm/elf.h
1508+++ b/arch/mips/include/asm/elf.h
1509@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1510 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1511 #endif
1512
1513+#ifdef CONFIG_PAX_ASLR
1514+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1515+
1516+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1517+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1518+#endif
1519+
1520 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1521 struct linux_binprm;
1522 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1523 int uses_interp);
1524
1525-struct mm_struct;
1526-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1527-#define arch_randomize_brk arch_randomize_brk
1528-
1529 #endif /* _ASM_ELF_H */
1530diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1531index e59cd1a..8e329d6 100644
1532--- a/arch/mips/include/asm/page.h
1533+++ b/arch/mips/include/asm/page.h
1534@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1535 #ifdef CONFIG_CPU_MIPS32
1536 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1537 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1538- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1539+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1540 #else
1541 typedef struct { unsigned long long pte; } pte_t;
1542 #define pte_val(x) ((x).pte)
1543diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1544index 6018c80..7c37203 100644
1545--- a/arch/mips/include/asm/system.h
1546+++ b/arch/mips/include/asm/system.h
1547@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1548 */
1549 #define __ARCH_WANT_UNLOCKED_CTXSW
1550
1551-extern unsigned long arch_align_stack(unsigned long sp);
1552+#define arch_align_stack(x) ((x) & ~0xfUL)
1553
1554 #endif /* _ASM_SYSTEM_H */
1555diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1556index 9fdd8bc..4bd7f1a 100644
1557--- a/arch/mips/kernel/binfmt_elfn32.c
1558+++ b/arch/mips/kernel/binfmt_elfn32.c
1559@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1560 #undef ELF_ET_DYN_BASE
1561 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1562
1563+#ifdef CONFIG_PAX_ASLR
1564+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1565+
1566+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1567+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1568+#endif
1569+
1570 #include <asm/processor.h>
1571 #include <linux/module.h>
1572 #include <linux/elfcore.h>
1573diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1574index ff44823..97f8906 100644
1575--- a/arch/mips/kernel/binfmt_elfo32.c
1576+++ b/arch/mips/kernel/binfmt_elfo32.c
1577@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1578 #undef ELF_ET_DYN_BASE
1579 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1580
1581+#ifdef CONFIG_PAX_ASLR
1582+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1583+
1584+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1585+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1586+#endif
1587+
1588 #include <asm/processor.h>
1589
1590 /*
1591diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1592index c47f96e..661d418 100644
1593--- a/arch/mips/kernel/process.c
1594+++ b/arch/mips/kernel/process.c
1595@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1596 out:
1597 return pc;
1598 }
1599-
1600-/*
1601- * Don't forget that the stack pointer must be aligned on a 8 bytes
1602- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1603- */
1604-unsigned long arch_align_stack(unsigned long sp)
1605-{
1606- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1607- sp -= get_random_int() & ~PAGE_MASK;
1608-
1609- return sp & ALMASK;
1610-}
1611diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1612index 937cf33..adb39bb 100644
1613--- a/arch/mips/mm/fault.c
1614+++ b/arch/mips/mm/fault.c
1615@@ -28,6 +28,23 @@
1616 #include <asm/highmem.h> /* For VMALLOC_END */
1617 #include <linux/kdebug.h>
1618
1619+#ifdef CONFIG_PAX_PAGEEXEC
1620+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1621+{
1622+ unsigned long i;
1623+
1624+ printk(KERN_ERR "PAX: bytes at PC: ");
1625+ for (i = 0; i < 5; i++) {
1626+ unsigned int c;
1627+ if (get_user(c, (unsigned int *)pc+i))
1628+ printk(KERN_CONT "???????? ");
1629+ else
1630+ printk(KERN_CONT "%08x ", c);
1631+ }
1632+ printk("\n");
1633+}
1634+#endif
1635+
1636 /*
1637 * This routine handles page faults. It determines the address,
1638 * and the problem, and then passes it off to one of the appropriate
1639diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1640index 302d779..7d35bf8 100644
1641--- a/arch/mips/mm/mmap.c
1642+++ b/arch/mips/mm/mmap.c
1643@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1644 do_color_align = 1;
1645
1646 /* requesting a specific address */
1647+
1648+#ifdef CONFIG_PAX_RANDMMAP
1649+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1650+#endif
1651+
1652 if (addr) {
1653 if (do_color_align)
1654 addr = COLOUR_ALIGN(addr, pgoff);
1655@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1656 addr = PAGE_ALIGN(addr);
1657
1658 vma = find_vma(mm, addr);
1659- if (TASK_SIZE - len >= addr &&
1660- (!vma || addr + len <= vma->vm_start))
1661+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1662 return addr;
1663 }
1664
1665@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1666 /* At this point: (!vma || addr < vma->vm_end). */
1667 if (TASK_SIZE - len < addr)
1668 return -ENOMEM;
1669- if (!vma || addr + len <= vma->vm_start)
1670+ if (check_heap_stack_gap(vmm, addr, len))
1671 return addr;
1672 addr = vma->vm_end;
1673 if (do_color_align)
1674@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1675 /* make sure it can fit in the remaining address space */
1676 if (likely(addr > len)) {
1677 vma = find_vma(mm, addr - len);
1678- if (!vma || addr <= vma->vm_start) {
1679+ if (check_heap_stack_gap(vmm, addr - len, len))
1680 /* cache the address as a hint for next time */
1681 return mm->free_area_cache = addr - len;
1682 }
1683@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1684 * return with success:
1685 */
1686 vma = find_vma(mm, addr);
1687- if (likely(!vma || addr + len <= vma->vm_start)) {
1688+ if (check_heap_stack_gap(vmm, addr, len)) {
1689 /* cache the address as a hint for next time */
1690 return mm->free_area_cache = addr;
1691 }
1692@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1693 mm->unmap_area = arch_unmap_area_topdown;
1694 }
1695 }
1696-
1697-static inline unsigned long brk_rnd(void)
1698-{
1699- unsigned long rnd = get_random_int();
1700-
1701- rnd = rnd << PAGE_SHIFT;
1702- /* 8MB for 32bit, 256MB for 64bit */
1703- if (TASK_IS_32BIT_ADDR)
1704- rnd = rnd & 0x7ffffful;
1705- else
1706- rnd = rnd & 0xffffffful;
1707-
1708- return rnd;
1709-}
1710-
1711-unsigned long arch_randomize_brk(struct mm_struct *mm)
1712-{
1713- unsigned long base = mm->brk;
1714- unsigned long ret;
1715-
1716- ret = PAGE_ALIGN(base + brk_rnd());
1717-
1718- if (ret < mm->brk)
1719- return mm->brk;
1720-
1721- return ret;
1722-}
1723diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1724index 19f6cb1..6c78cf2 100644
1725--- a/arch/parisc/include/asm/elf.h
1726+++ b/arch/parisc/include/asm/elf.h
1727@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1728
1729 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1730
1731+#ifdef CONFIG_PAX_ASLR
1732+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1733+
1734+#define PAX_DELTA_MMAP_LEN 16
1735+#define PAX_DELTA_STACK_LEN 16
1736+#endif
1737+
1738 /* This yields a mask that user programs can use to figure out what
1739 instruction set this CPU supports. This could be done in user space,
1740 but it's not easy, and we've already done it here. */
1741diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1742index 22dadeb..f6c2be4 100644
1743--- a/arch/parisc/include/asm/pgtable.h
1744+++ b/arch/parisc/include/asm/pgtable.h
1745@@ -210,6 +210,17 @@ struct vm_area_struct;
1746 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1747 #define PAGE_COPY PAGE_EXECREAD
1748 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1749+
1750+#ifdef CONFIG_PAX_PAGEEXEC
1751+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1752+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1753+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1754+#else
1755+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1756+# define PAGE_COPY_NOEXEC PAGE_COPY
1757+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1758+#endif
1759+
1760 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1761 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1762 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1763diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1764index 5e34ccf..672bc9c 100644
1765--- a/arch/parisc/kernel/module.c
1766+++ b/arch/parisc/kernel/module.c
1767@@ -98,16 +98,38 @@
1768
1769 /* three functions to determine where in the module core
1770 * or init pieces the location is */
1771+static inline int in_init_rx(struct module *me, void *loc)
1772+{
1773+ return (loc >= me->module_init_rx &&
1774+ loc < (me->module_init_rx + me->init_size_rx));
1775+}
1776+
1777+static inline int in_init_rw(struct module *me, void *loc)
1778+{
1779+ return (loc >= me->module_init_rw &&
1780+ loc < (me->module_init_rw + me->init_size_rw));
1781+}
1782+
1783 static inline int in_init(struct module *me, void *loc)
1784 {
1785- return (loc >= me->module_init &&
1786- loc <= (me->module_init + me->init_size));
1787+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1788+}
1789+
1790+static inline int in_core_rx(struct module *me, void *loc)
1791+{
1792+ return (loc >= me->module_core_rx &&
1793+ loc < (me->module_core_rx + me->core_size_rx));
1794+}
1795+
1796+static inline int in_core_rw(struct module *me, void *loc)
1797+{
1798+ return (loc >= me->module_core_rw &&
1799+ loc < (me->module_core_rw + me->core_size_rw));
1800 }
1801
1802 static inline int in_core(struct module *me, void *loc)
1803 {
1804- return (loc >= me->module_core &&
1805- loc <= (me->module_core + me->core_size));
1806+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1807 }
1808
1809 static inline int in_local(struct module *me, void *loc)
1810@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1811 }
1812
1813 /* align things a bit */
1814- me->core_size = ALIGN(me->core_size, 16);
1815- me->arch.got_offset = me->core_size;
1816- me->core_size += gots * sizeof(struct got_entry);
1817+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1818+ me->arch.got_offset = me->core_size_rw;
1819+ me->core_size_rw += gots * sizeof(struct got_entry);
1820
1821- me->core_size = ALIGN(me->core_size, 16);
1822- me->arch.fdesc_offset = me->core_size;
1823- me->core_size += fdescs * sizeof(Elf_Fdesc);
1824+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1825+ me->arch.fdesc_offset = me->core_size_rw;
1826+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1827
1828 me->arch.got_max = gots;
1829 me->arch.fdesc_max = fdescs;
1830@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1831
1832 BUG_ON(value == 0);
1833
1834- got = me->module_core + me->arch.got_offset;
1835+ got = me->module_core_rw + me->arch.got_offset;
1836 for (i = 0; got[i].addr; i++)
1837 if (got[i].addr == value)
1838 goto out;
1839@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1840 #ifdef CONFIG_64BIT
1841 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1842 {
1843- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1844+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1845
1846 if (!value) {
1847 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1848@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1849
1850 /* Create new one */
1851 fdesc->addr = value;
1852- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1853+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1854 return (Elf_Addr)fdesc;
1855 }
1856 #endif /* CONFIG_64BIT */
1857@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1858
1859 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1860 end = table + sechdrs[me->arch.unwind_section].sh_size;
1861- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1862+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1863
1864 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1865 me->arch.unwind_section, table, end, gp);
1866diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
1867index c9b9322..02d8940 100644
1868--- a/arch/parisc/kernel/sys_parisc.c
1869+++ b/arch/parisc/kernel/sys_parisc.c
1870@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
1871 /* At this point: (!vma || addr < vma->vm_end). */
1872 if (TASK_SIZE - len < addr)
1873 return -ENOMEM;
1874- if (!vma || addr + len <= vma->vm_start)
1875+ if (check_heap_stack_gap(vma, addr, len))
1876 return addr;
1877 addr = vma->vm_end;
1878 }
1879@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
1880 /* At this point: (!vma || addr < vma->vm_end). */
1881 if (TASK_SIZE - len < addr)
1882 return -ENOMEM;
1883- if (!vma || addr + len <= vma->vm_start)
1884+ if (check_heap_stack_gap(vma, addr, len))
1885 return addr;
1886 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1887 if (addr < vma->vm_end) /* handle wraparound */
1888@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
1889 if (flags & MAP_FIXED)
1890 return addr;
1891 if (!addr)
1892- addr = TASK_UNMAPPED_BASE;
1893+ addr = current->mm->mmap_base;
1894
1895 if (filp) {
1896 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1897diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
1898index f19e660..414fe24 100644
1899--- a/arch/parisc/kernel/traps.c
1900+++ b/arch/parisc/kernel/traps.c
1901@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
1902
1903 down_read(&current->mm->mmap_sem);
1904 vma = find_vma(current->mm,regs->iaoq[0]);
1905- if (vma && (regs->iaoq[0] >= vma->vm_start)
1906- && (vma->vm_flags & VM_EXEC)) {
1907-
1908+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1909 fault_address = regs->iaoq[0];
1910 fault_space = regs->iasq[0];
1911
1912diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
1913index 18162ce..94de376 100644
1914--- a/arch/parisc/mm/fault.c
1915+++ b/arch/parisc/mm/fault.c
1916@@ -15,6 +15,7 @@
1917 #include <linux/sched.h>
1918 #include <linux/interrupt.h>
1919 #include <linux/module.h>
1920+#include <linux/unistd.h>
1921
1922 #include <asm/uaccess.h>
1923 #include <asm/traps.h>
1924@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
1925 static unsigned long
1926 parisc_acctyp(unsigned long code, unsigned int inst)
1927 {
1928- if (code == 6 || code == 16)
1929+ if (code == 6 || code == 7 || code == 16)
1930 return VM_EXEC;
1931
1932 switch (inst & 0xf0000000) {
1933@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
1934 }
1935 #endif
1936
1937+#ifdef CONFIG_PAX_PAGEEXEC
1938+/*
1939+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1940+ *
1941+ * returns 1 when task should be killed
1942+ * 2 when rt_sigreturn trampoline was detected
1943+ * 3 when unpatched PLT trampoline was detected
1944+ */
1945+static int pax_handle_fetch_fault(struct pt_regs *regs)
1946+{
1947+
1948+#ifdef CONFIG_PAX_EMUPLT
1949+ int err;
1950+
1951+ do { /* PaX: unpatched PLT emulation */
1952+ unsigned int bl, depwi;
1953+
1954+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1955+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1956+
1957+ if (err)
1958+ break;
1959+
1960+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1961+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1962+
1963+ err = get_user(ldw, (unsigned int *)addr);
1964+ err |= get_user(bv, (unsigned int *)(addr+4));
1965+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1966+
1967+ if (err)
1968+ break;
1969+
1970+ if (ldw == 0x0E801096U &&
1971+ bv == 0xEAC0C000U &&
1972+ ldw2 == 0x0E881095U)
1973+ {
1974+ unsigned int resolver, map;
1975+
1976+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1977+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1978+ if (err)
1979+ break;
1980+
1981+ regs->gr[20] = instruction_pointer(regs)+8;
1982+ regs->gr[21] = map;
1983+ regs->gr[22] = resolver;
1984+ regs->iaoq[0] = resolver | 3UL;
1985+ regs->iaoq[1] = regs->iaoq[0] + 4;
1986+ return 3;
1987+ }
1988+ }
1989+ } while (0);
1990+#endif
1991+
1992+#ifdef CONFIG_PAX_EMUTRAMP
1993+
1994+#ifndef CONFIG_PAX_EMUSIGRT
1995+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1996+ return 1;
1997+#endif
1998+
1999+ do { /* PaX: rt_sigreturn emulation */
2000+ unsigned int ldi1, ldi2, bel, nop;
2001+
2002+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2003+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2004+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2005+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2006+
2007+ if (err)
2008+ break;
2009+
2010+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2011+ ldi2 == 0x3414015AU &&
2012+ bel == 0xE4008200U &&
2013+ nop == 0x08000240U)
2014+ {
2015+ regs->gr[25] = (ldi1 & 2) >> 1;
2016+ regs->gr[20] = __NR_rt_sigreturn;
2017+ regs->gr[31] = regs->iaoq[1] + 16;
2018+ regs->sr[0] = regs->iasq[1];
2019+ regs->iaoq[0] = 0x100UL;
2020+ regs->iaoq[1] = regs->iaoq[0] + 4;
2021+ regs->iasq[0] = regs->sr[2];
2022+ regs->iasq[1] = regs->sr[2];
2023+ return 2;
2024+ }
2025+ } while (0);
2026+#endif
2027+
2028+ return 1;
2029+}
2030+
2031+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2032+{
2033+ unsigned long i;
2034+
2035+ printk(KERN_ERR "PAX: bytes at PC: ");
2036+ for (i = 0; i < 5; i++) {
2037+ unsigned int c;
2038+ if (get_user(c, (unsigned int *)pc+i))
2039+ printk(KERN_CONT "???????? ");
2040+ else
2041+ printk(KERN_CONT "%08x ", c);
2042+ }
2043+ printk("\n");
2044+}
2045+#endif
2046+
2047 int fixup_exception(struct pt_regs *regs)
2048 {
2049 const struct exception_table_entry *fix;
2050@@ -192,8 +303,33 @@ good_area:
2051
2052 acc_type = parisc_acctyp(code,regs->iir);
2053
2054- if ((vma->vm_flags & acc_type) != acc_type)
2055+ if ((vma->vm_flags & acc_type) != acc_type) {
2056+
2057+#ifdef CONFIG_PAX_PAGEEXEC
2058+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2059+ (address & ~3UL) == instruction_pointer(regs))
2060+ {
2061+ up_read(&mm->mmap_sem);
2062+ switch (pax_handle_fetch_fault(regs)) {
2063+
2064+#ifdef CONFIG_PAX_EMUPLT
2065+ case 3:
2066+ return;
2067+#endif
2068+
2069+#ifdef CONFIG_PAX_EMUTRAMP
2070+ case 2:
2071+ return;
2072+#endif
2073+
2074+ }
2075+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2076+ do_group_exit(SIGKILL);
2077+ }
2078+#endif
2079+
2080 goto bad_area;
2081+ }
2082
2083 /*
2084 * If for any reason at all we couldn't handle the fault, make
2085diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2086index 3bf9cca..e7457d0 100644
2087--- a/arch/powerpc/include/asm/elf.h
2088+++ b/arch/powerpc/include/asm/elf.h
2089@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2090 the loader. We need to make sure that it is out of the way of the program
2091 that it will "exec", and that there is sufficient room for the brk. */
2092
2093-extern unsigned long randomize_et_dyn(unsigned long base);
2094-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2095+#define ELF_ET_DYN_BASE (0x20000000)
2096+
2097+#ifdef CONFIG_PAX_ASLR
2098+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2099+
2100+#ifdef __powerpc64__
2101+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2102+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2103+#else
2104+#define PAX_DELTA_MMAP_LEN 15
2105+#define PAX_DELTA_STACK_LEN 15
2106+#endif
2107+#endif
2108
2109 /*
2110 * Our registers are always unsigned longs, whether we're a 32 bit
2111@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2112 (0x7ff >> (PAGE_SHIFT - 12)) : \
2113 (0x3ffff >> (PAGE_SHIFT - 12)))
2114
2115-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2116-#define arch_randomize_brk arch_randomize_brk
2117-
2118 #endif /* __KERNEL__ */
2119
2120 /*
2121diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2122index bca8fdc..61e9580 100644
2123--- a/arch/powerpc/include/asm/kmap_types.h
2124+++ b/arch/powerpc/include/asm/kmap_types.h
2125@@ -27,6 +27,7 @@ enum km_type {
2126 KM_PPC_SYNC_PAGE,
2127 KM_PPC_SYNC_ICACHE,
2128 KM_KDB,
2129+ KM_CLEARPAGE,
2130 KM_TYPE_NR
2131 };
2132
2133diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2134index d4a7f64..451de1c 100644
2135--- a/arch/powerpc/include/asm/mman.h
2136+++ b/arch/powerpc/include/asm/mman.h
2137@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2138 }
2139 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2140
2141-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2142+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2143 {
2144 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2145 }
2146diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2147index dd9c4fd..a2ced87 100644
2148--- a/arch/powerpc/include/asm/page.h
2149+++ b/arch/powerpc/include/asm/page.h
2150@@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
2151 * and needs to be executable. This means the whole heap ends
2152 * up being executable.
2153 */
2154-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2155- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2156+#define VM_DATA_DEFAULT_FLAGS32 \
2157+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2158+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2159
2160 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2161 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2162@@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
2163 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2164 #endif
2165
2166+#define ktla_ktva(addr) (addr)
2167+#define ktva_ktla(addr) (addr)
2168+
2169 /*
2170 * Use the top bit of the higher-level page table entries to indicate whether
2171 * the entries we point to contain hugepages. This works because we know that
2172diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2173index fb40ede..d3ce956 100644
2174--- a/arch/powerpc/include/asm/page_64.h
2175+++ b/arch/powerpc/include/asm/page_64.h
2176@@ -144,15 +144,18 @@ do { \
2177 * stack by default, so in the absence of a PT_GNU_STACK program header
2178 * we turn execute permission off.
2179 */
2180-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2181- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2182+#define VM_STACK_DEFAULT_FLAGS32 \
2183+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2184+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2185
2186 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2187 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2188
2189+#ifndef CONFIG_PAX_PAGEEXEC
2190 #define VM_STACK_DEFAULT_FLAGS \
2191 (is_32bit_task() ? \
2192 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2193+#endif
2194
2195 #include <asm-generic/getorder.h>
2196
2197diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2198index 88b0bd9..e32bc67 100644
2199--- a/arch/powerpc/include/asm/pgtable.h
2200+++ b/arch/powerpc/include/asm/pgtable.h
2201@@ -2,6 +2,7 @@
2202 #define _ASM_POWERPC_PGTABLE_H
2203 #ifdef __KERNEL__
2204
2205+#include <linux/const.h>
2206 #ifndef __ASSEMBLY__
2207 #include <asm/processor.h> /* For TASK_SIZE */
2208 #include <asm/mmu.h>
2209diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2210index 4aad413..85d86bf 100644
2211--- a/arch/powerpc/include/asm/pte-hash32.h
2212+++ b/arch/powerpc/include/asm/pte-hash32.h
2213@@ -21,6 +21,7 @@
2214 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2215 #define _PAGE_USER 0x004 /* usermode access allowed */
2216 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2217+#define _PAGE_EXEC _PAGE_GUARDED
2218 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2219 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2220 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2221diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2222index 559da19..7e5835c 100644
2223--- a/arch/powerpc/include/asm/reg.h
2224+++ b/arch/powerpc/include/asm/reg.h
2225@@ -212,6 +212,7 @@
2226 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2227 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2228 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2229+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2230 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2231 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2232 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2233diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2234index e30a13d..2b7d994 100644
2235--- a/arch/powerpc/include/asm/system.h
2236+++ b/arch/powerpc/include/asm/system.h
2237@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2238 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2239 #endif
2240
2241-extern unsigned long arch_align_stack(unsigned long sp);
2242+#define arch_align_stack(x) ((x) & ~0xfUL)
2243
2244 /* Used in very early kernel initialization. */
2245 extern unsigned long reloc_offset(void);
2246diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2247index bd0fb84..a42a14b 100644
2248--- a/arch/powerpc/include/asm/uaccess.h
2249+++ b/arch/powerpc/include/asm/uaccess.h
2250@@ -13,6 +13,8 @@
2251 #define VERIFY_READ 0
2252 #define VERIFY_WRITE 1
2253
2254+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2255+
2256 /*
2257 * The fs value determines whether argument validity checking should be
2258 * performed or not. If get_fs() == USER_DS, checking is performed, with
2259@@ -327,52 +329,6 @@ do { \
2260 extern unsigned long __copy_tofrom_user(void __user *to,
2261 const void __user *from, unsigned long size);
2262
2263-#ifndef __powerpc64__
2264-
2265-static inline unsigned long copy_from_user(void *to,
2266- const void __user *from, unsigned long n)
2267-{
2268- unsigned long over;
2269-
2270- if (access_ok(VERIFY_READ, from, n))
2271- return __copy_tofrom_user((__force void __user *)to, from, n);
2272- if ((unsigned long)from < TASK_SIZE) {
2273- over = (unsigned long)from + n - TASK_SIZE;
2274- return __copy_tofrom_user((__force void __user *)to, from,
2275- n - over) + over;
2276- }
2277- return n;
2278-}
2279-
2280-static inline unsigned long copy_to_user(void __user *to,
2281- const void *from, unsigned long n)
2282-{
2283- unsigned long over;
2284-
2285- if (access_ok(VERIFY_WRITE, to, n))
2286- return __copy_tofrom_user(to, (__force void __user *)from, n);
2287- if ((unsigned long)to < TASK_SIZE) {
2288- over = (unsigned long)to + n - TASK_SIZE;
2289- return __copy_tofrom_user(to, (__force void __user *)from,
2290- n - over) + over;
2291- }
2292- return n;
2293-}
2294-
2295-#else /* __powerpc64__ */
2296-
2297-#define __copy_in_user(to, from, size) \
2298- __copy_tofrom_user((to), (from), (size))
2299-
2300-extern unsigned long copy_from_user(void *to, const void __user *from,
2301- unsigned long n);
2302-extern unsigned long copy_to_user(void __user *to, const void *from,
2303- unsigned long n);
2304-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2305- unsigned long n);
2306-
2307-#endif /* __powerpc64__ */
2308-
2309 static inline unsigned long __copy_from_user_inatomic(void *to,
2310 const void __user *from, unsigned long n)
2311 {
2312@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2313 if (ret == 0)
2314 return 0;
2315 }
2316+
2317+ if (!__builtin_constant_p(n))
2318+ check_object_size(to, n, false);
2319+
2320 return __copy_tofrom_user((__force void __user *)to, from, n);
2321 }
2322
2323@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2324 if (ret == 0)
2325 return 0;
2326 }
2327+
2328+ if (!__builtin_constant_p(n))
2329+ check_object_size(from, n, true);
2330+
2331 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2332 }
2333
2334@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2335 return __copy_to_user_inatomic(to, from, size);
2336 }
2337
2338+#ifndef __powerpc64__
2339+
2340+static inline unsigned long __must_check copy_from_user(void *to,
2341+ const void __user *from, unsigned long n)
2342+{
2343+ unsigned long over;
2344+
2345+ if ((long)n < 0)
2346+ return n;
2347+
2348+ if (access_ok(VERIFY_READ, from, n)) {
2349+ if (!__builtin_constant_p(n))
2350+ check_object_size(to, n, false);
2351+ return __copy_tofrom_user((__force void __user *)to, from, n);
2352+ }
2353+ if ((unsigned long)from < TASK_SIZE) {
2354+ over = (unsigned long)from + n - TASK_SIZE;
2355+ if (!__builtin_constant_p(n - over))
2356+ check_object_size(to, n - over, false);
2357+ return __copy_tofrom_user((__force void __user *)to, from,
2358+ n - over) + over;
2359+ }
2360+ return n;
2361+}
2362+
2363+static inline unsigned long __must_check copy_to_user(void __user *to,
2364+ const void *from, unsigned long n)
2365+{
2366+ unsigned long over;
2367+
2368+ if ((long)n < 0)
2369+ return n;
2370+
2371+ if (access_ok(VERIFY_WRITE, to, n)) {
2372+ if (!__builtin_constant_p(n))
2373+ check_object_size(from, n, true);
2374+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2375+ }
2376+ if ((unsigned long)to < TASK_SIZE) {
2377+ over = (unsigned long)to + n - TASK_SIZE;
2378+ if (!__builtin_constant_p(n))
2379+ check_object_size(from, n - over, true);
2380+ return __copy_tofrom_user(to, (__force void __user *)from,
2381+ n - over) + over;
2382+ }
2383+ return n;
2384+}
2385+
2386+#else /* __powerpc64__ */
2387+
2388+#define __copy_in_user(to, from, size) \
2389+ __copy_tofrom_user((to), (from), (size))
2390+
2391+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2392+{
2393+ if ((long)n < 0 || n > INT_MAX)
2394+ return n;
2395+
2396+ if (!__builtin_constant_p(n))
2397+ check_object_size(to, n, false);
2398+
2399+ if (likely(access_ok(VERIFY_READ, from, n)))
2400+ n = __copy_from_user(to, from, n);
2401+ else
2402+ memset(to, 0, n);
2403+ return n;
2404+}
2405+
2406+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2407+{
2408+ if ((long)n < 0 || n > INT_MAX)
2409+ return n;
2410+
2411+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2412+ if (!__builtin_constant_p(n))
2413+ check_object_size(from, n, true);
2414+ n = __copy_to_user(to, from, n);
2415+ }
2416+ return n;
2417+}
2418+
2419+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2420+ unsigned long n);
2421+
2422+#endif /* __powerpc64__ */
2423+
2424 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2425
2426 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2427diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2428index 429983c..7af363b 100644
2429--- a/arch/powerpc/kernel/exceptions-64e.S
2430+++ b/arch/powerpc/kernel/exceptions-64e.S
2431@@ -587,6 +587,7 @@ storage_fault_common:
2432 std r14,_DAR(r1)
2433 std r15,_DSISR(r1)
2434 addi r3,r1,STACK_FRAME_OVERHEAD
2435+ bl .save_nvgprs
2436 mr r4,r14
2437 mr r5,r15
2438 ld r14,PACA_EXGEN+EX_R14(r13)
2439@@ -596,8 +597,7 @@ storage_fault_common:
2440 cmpdi r3,0
2441 bne- 1f
2442 b .ret_from_except_lite
2443-1: bl .save_nvgprs
2444- mr r5,r3
2445+1: mr r5,r3
2446 addi r3,r1,STACK_FRAME_OVERHEAD
2447 ld r4,_DAR(r1)
2448 bl .bad_page_fault
2449diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2450index cf9c69b..ebc9640 100644
2451--- a/arch/powerpc/kernel/exceptions-64s.S
2452+++ b/arch/powerpc/kernel/exceptions-64s.S
2453@@ -1004,10 +1004,10 @@ handle_page_fault:
2454 11: ld r4,_DAR(r1)
2455 ld r5,_DSISR(r1)
2456 addi r3,r1,STACK_FRAME_OVERHEAD
2457+ bl .save_nvgprs
2458 bl .do_page_fault
2459 cmpdi r3,0
2460 beq+ 13f
2461- bl .save_nvgprs
2462 mr r5,r3
2463 addi r3,r1,STACK_FRAME_OVERHEAD
2464 lwz r4,_DAR(r1)
2465diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2466index 0b6d796..d760ddb 100644
2467--- a/arch/powerpc/kernel/module_32.c
2468+++ b/arch/powerpc/kernel/module_32.c
2469@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2470 me->arch.core_plt_section = i;
2471 }
2472 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2473- printk("Module doesn't contain .plt or .init.plt sections.\n");
2474+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2475 return -ENOEXEC;
2476 }
2477
2478@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2479
2480 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2481 /* Init, or core PLT? */
2482- if (location >= mod->module_core
2483- && location < mod->module_core + mod->core_size)
2484+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2485+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2486 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2487- else
2488+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2489+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2490 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2491+ else {
2492+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2493+ return ~0UL;
2494+ }
2495
2496 /* Find this entry, or if that fails, the next avail. entry */
2497 while (entry->jump[0]) {
2498diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2499index 6457574..08b28d3 100644
2500--- a/arch/powerpc/kernel/process.c
2501+++ b/arch/powerpc/kernel/process.c
2502@@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
2503 * Lookup NIP late so we have the best change of getting the
2504 * above info out without failing
2505 */
2506- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2507- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2508+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2509+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2510 #endif
2511 show_stack(current, (unsigned long *) regs->gpr[1]);
2512 if (!user_mode(regs))
2513@@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2514 newsp = stack[0];
2515 ip = stack[STACK_FRAME_LR_SAVE];
2516 if (!firstframe || ip != lr) {
2517- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2518+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2519 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2520 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2521- printk(" (%pS)",
2522+ printk(" (%pA)",
2523 (void *)current->ret_stack[curr_frame].ret);
2524 curr_frame--;
2525 }
2526@@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2527 struct pt_regs *regs = (struct pt_regs *)
2528 (sp + STACK_FRAME_OVERHEAD);
2529 lr = regs->link;
2530- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2531+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2532 regs->trap, (void *)regs->nip, (void *)lr);
2533 firstframe = 1;
2534 }
2535@@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
2536 }
2537
2538 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2539-
2540-unsigned long arch_align_stack(unsigned long sp)
2541-{
2542- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2543- sp -= get_random_int() & ~PAGE_MASK;
2544- return sp & ~0xf;
2545-}
2546-
2547-static inline unsigned long brk_rnd(void)
2548-{
2549- unsigned long rnd = 0;
2550-
2551- /* 8MB for 32bit, 1GB for 64bit */
2552- if (is_32bit_task())
2553- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2554- else
2555- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2556-
2557- return rnd << PAGE_SHIFT;
2558-}
2559-
2560-unsigned long arch_randomize_brk(struct mm_struct *mm)
2561-{
2562- unsigned long base = mm->brk;
2563- unsigned long ret;
2564-
2565-#ifdef CONFIG_PPC_STD_MMU_64
2566- /*
2567- * If we are using 1TB segments and we are allowed to randomise
2568- * the heap, we can put it above 1TB so it is backed by a 1TB
2569- * segment. Otherwise the heap will be in the bottom 1TB
2570- * which always uses 256MB segments and this may result in a
2571- * performance penalty.
2572- */
2573- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2574- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2575-#endif
2576-
2577- ret = PAGE_ALIGN(base + brk_rnd());
2578-
2579- if (ret < mm->brk)
2580- return mm->brk;
2581-
2582- return ret;
2583-}
2584-
2585-unsigned long randomize_et_dyn(unsigned long base)
2586-{
2587- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2588-
2589- if (ret < base)
2590- return base;
2591-
2592- return ret;
2593-}
2594diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2595index 836a5a1..27289a3 100644
2596--- a/arch/powerpc/kernel/signal_32.c
2597+++ b/arch/powerpc/kernel/signal_32.c
2598@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2599 /* Save user registers on the stack */
2600 frame = &rt_sf->uc.uc_mcontext;
2601 addr = frame;
2602- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2603+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2604 if (save_user_regs(regs, frame, 0, 1))
2605 goto badframe;
2606 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2607diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2608index a50b5ec..547078a 100644
2609--- a/arch/powerpc/kernel/signal_64.c
2610+++ b/arch/powerpc/kernel/signal_64.c
2611@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2612 current->thread.fpscr.val = 0;
2613
2614 /* Set up to return from userspace. */
2615- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2616+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2617 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2618 } else {
2619 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2620diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2621index 5459d14..10f8070 100644
2622--- a/arch/powerpc/kernel/traps.c
2623+++ b/arch/powerpc/kernel/traps.c
2624@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2625 static inline void pmac_backlight_unblank(void) { }
2626 #endif
2627
2628+extern void gr_handle_kernel_exploit(void);
2629+
2630 int die(const char *str, struct pt_regs *regs, long err)
2631 {
2632 static struct {
2633@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2634 if (panic_on_oops)
2635 panic("Fatal exception");
2636
2637+ gr_handle_kernel_exploit();
2638+
2639 oops_exit();
2640 do_exit(err);
2641
2642diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2643index 7d14bb6..1305601 100644
2644--- a/arch/powerpc/kernel/vdso.c
2645+++ b/arch/powerpc/kernel/vdso.c
2646@@ -35,6 +35,7 @@
2647 #include <asm/firmware.h>
2648 #include <asm/vdso.h>
2649 #include <asm/vdso_datapage.h>
2650+#include <asm/mman.h>
2651
2652 #include "setup.h"
2653
2654@@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2655 vdso_base = VDSO32_MBASE;
2656 #endif
2657
2658- current->mm->context.vdso_base = 0;
2659+ current->mm->context.vdso_base = ~0UL;
2660
2661 /* vDSO has a problem and was disabled, just don't "enable" it for the
2662 * process
2663@@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2664 vdso_base = get_unmapped_area(NULL, vdso_base,
2665 (vdso_pages << PAGE_SHIFT) +
2666 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2667- 0, 0);
2668+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2669 if (IS_ERR_VALUE(vdso_base)) {
2670 rc = vdso_base;
2671 goto fail_mmapsem;
2672diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2673index 5eea6f3..5d10396 100644
2674--- a/arch/powerpc/lib/usercopy_64.c
2675+++ b/arch/powerpc/lib/usercopy_64.c
2676@@ -9,22 +9,6 @@
2677 #include <linux/module.h>
2678 #include <asm/uaccess.h>
2679
2680-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2681-{
2682- if (likely(access_ok(VERIFY_READ, from, n)))
2683- n = __copy_from_user(to, from, n);
2684- else
2685- memset(to, 0, n);
2686- return n;
2687-}
2688-
2689-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2690-{
2691- if (likely(access_ok(VERIFY_WRITE, to, n)))
2692- n = __copy_to_user(to, from, n);
2693- return n;
2694-}
2695-
2696 unsigned long copy_in_user(void __user *to, const void __user *from,
2697 unsigned long n)
2698 {
2699@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2700 return n;
2701 }
2702
2703-EXPORT_SYMBOL(copy_from_user);
2704-EXPORT_SYMBOL(copy_to_user);
2705 EXPORT_SYMBOL(copy_in_user);
2706
2707diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2708index 5efe8c9..db9ceef 100644
2709--- a/arch/powerpc/mm/fault.c
2710+++ b/arch/powerpc/mm/fault.c
2711@@ -32,6 +32,10 @@
2712 #include <linux/perf_event.h>
2713 #include <linux/magic.h>
2714 #include <linux/ratelimit.h>
2715+#include <linux/slab.h>
2716+#include <linux/pagemap.h>
2717+#include <linux/compiler.h>
2718+#include <linux/unistd.h>
2719
2720 #include <asm/firmware.h>
2721 #include <asm/page.h>
2722@@ -43,6 +47,7 @@
2723 #include <asm/tlbflush.h>
2724 #include <asm/siginfo.h>
2725 #include <mm/mmu_decl.h>
2726+#include <asm/ptrace.h>
2727
2728 #ifdef CONFIG_KPROBES
2729 static inline int notify_page_fault(struct pt_regs *regs)
2730@@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2731 }
2732 #endif
2733
2734+#ifdef CONFIG_PAX_PAGEEXEC
2735+/*
2736+ * PaX: decide what to do with offenders (regs->nip = fault address)
2737+ *
2738+ * returns 1 when task should be killed
2739+ */
2740+static int pax_handle_fetch_fault(struct pt_regs *regs)
2741+{
2742+ return 1;
2743+}
2744+
2745+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2746+{
2747+ unsigned long i;
2748+
2749+ printk(KERN_ERR "PAX: bytes at PC: ");
2750+ for (i = 0; i < 5; i++) {
2751+ unsigned int c;
2752+ if (get_user(c, (unsigned int __user *)pc+i))
2753+ printk(KERN_CONT "???????? ");
2754+ else
2755+ printk(KERN_CONT "%08x ", c);
2756+ }
2757+ printk("\n");
2758+}
2759+#endif
2760+
2761 /*
2762 * Check whether the instruction at regs->nip is a store using
2763 * an update addressing form which will update r1.
2764@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2765 * indicate errors in DSISR but can validly be set in SRR1.
2766 */
2767 if (trap == 0x400)
2768- error_code &= 0x48200000;
2769+ error_code &= 0x58200000;
2770 else
2771 is_write = error_code & DSISR_ISSTORE;
2772 #else
2773@@ -259,7 +291,7 @@ good_area:
2774 * "undefined". Of those that can be set, this is the only
2775 * one which seems bad.
2776 */
2777- if (error_code & 0x10000000)
2778+ if (error_code & DSISR_GUARDED)
2779 /* Guarded storage error. */
2780 goto bad_area;
2781 #endif /* CONFIG_8xx */
2782@@ -274,7 +306,7 @@ good_area:
2783 * processors use the same I/D cache coherency mechanism
2784 * as embedded.
2785 */
2786- if (error_code & DSISR_PROTFAULT)
2787+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2788 goto bad_area;
2789 #endif /* CONFIG_PPC_STD_MMU */
2790
2791@@ -343,6 +375,23 @@ bad_area:
2792 bad_area_nosemaphore:
2793 /* User mode accesses cause a SIGSEGV */
2794 if (user_mode(regs)) {
2795+
2796+#ifdef CONFIG_PAX_PAGEEXEC
2797+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2798+#ifdef CONFIG_PPC_STD_MMU
2799+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2800+#else
2801+ if (is_exec && regs->nip == address) {
2802+#endif
2803+ switch (pax_handle_fetch_fault(regs)) {
2804+ }
2805+
2806+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2807+ do_group_exit(SIGKILL);
2808+ }
2809+ }
2810+#endif
2811+
2812 _exception(SIGSEGV, regs, code, address);
2813 return 0;
2814 }
2815diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2816index 5a783d8..c23e14b 100644
2817--- a/arch/powerpc/mm/mmap_64.c
2818+++ b/arch/powerpc/mm/mmap_64.c
2819@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2820 */
2821 if (mmap_is_legacy()) {
2822 mm->mmap_base = TASK_UNMAPPED_BASE;
2823+
2824+#ifdef CONFIG_PAX_RANDMMAP
2825+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2826+ mm->mmap_base += mm->delta_mmap;
2827+#endif
2828+
2829 mm->get_unmapped_area = arch_get_unmapped_area;
2830 mm->unmap_area = arch_unmap_area;
2831 } else {
2832 mm->mmap_base = mmap_base();
2833+
2834+#ifdef CONFIG_PAX_RANDMMAP
2835+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2836+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2837+#endif
2838+
2839 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2840 mm->unmap_area = arch_unmap_area_topdown;
2841 }
2842diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
2843index 73709f7..6b90313 100644
2844--- a/arch/powerpc/mm/slice.c
2845+++ b/arch/powerpc/mm/slice.c
2846@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
2847 if ((mm->task_size - len) < addr)
2848 return 0;
2849 vma = find_vma(mm, addr);
2850- return (!vma || (addr + len) <= vma->vm_start);
2851+ return check_heap_stack_gap(vma, addr, len);
2852 }
2853
2854 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2855@@ -256,7 +256,7 @@ full_search:
2856 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2857 continue;
2858 }
2859- if (!vma || addr + len <= vma->vm_start) {
2860+ if (check_heap_stack_gap(vma, addr, len)) {
2861 /*
2862 * Remember the place where we stopped the search:
2863 */
2864@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2865 }
2866 }
2867
2868- addr = mm->mmap_base;
2869- while (addr > len) {
2870+ if (mm->mmap_base < len)
2871+ addr = -ENOMEM;
2872+ else
2873+ addr = mm->mmap_base - len;
2874+
2875+ while (!IS_ERR_VALUE(addr)) {
2876 /* Go down by chunk size */
2877- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2878+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2879
2880 /* Check for hit with different page size */
2881 mask = slice_range_to_mask(addr, len);
2882@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2883 * return with success:
2884 */
2885 vma = find_vma(mm, addr);
2886- if (!vma || (addr + len) <= vma->vm_start) {
2887+ if (check_heap_stack_gap(vma, addr, len)) {
2888 /* remember the address as a hint for next time */
2889 if (use_cache)
2890 mm->free_area_cache = addr;
2891@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2892 mm->cached_hole_size = vma->vm_start - addr;
2893
2894 /* try just below the current vma->vm_start */
2895- addr = vma->vm_start;
2896+ addr = skip_heap_stack_gap(vma, len);
2897 }
2898
2899 /*
2900@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
2901 if (fixed && addr > (mm->task_size - len))
2902 return -EINVAL;
2903
2904+#ifdef CONFIG_PAX_RANDMMAP
2905+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2906+ addr = 0;
2907+#endif
2908+
2909 /* If hint, make sure it matches our alignment restrictions */
2910 if (!fixed && addr) {
2911 addr = _ALIGN_UP(addr, 1ul << pshift);
2912diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
2913index 547f1a6..3fff354 100644
2914--- a/arch/s390/include/asm/elf.h
2915+++ b/arch/s390/include/asm/elf.h
2916@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2917 the loader. We need to make sure that it is out of the way of the program
2918 that it will "exec", and that there is sufficient room for the brk. */
2919
2920-extern unsigned long randomize_et_dyn(unsigned long base);
2921-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2922+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2923+
2924+#ifdef CONFIG_PAX_ASLR
2925+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2926+
2927+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2928+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2929+#endif
2930
2931 /* This yields a mask that user programs can use to figure out what
2932 instruction set this CPU supports. */
2933@@ -211,7 +217,4 @@ struct linux_binprm;
2934 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2935 int arch_setup_additional_pages(struct linux_binprm *, int);
2936
2937-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2938-#define arch_randomize_brk arch_randomize_brk
2939-
2940 #endif
2941diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
2942index ef573c1..75a1ce6 100644
2943--- a/arch/s390/include/asm/system.h
2944+++ b/arch/s390/include/asm/system.h
2945@@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
2946 extern void (*_machine_halt)(void);
2947 extern void (*_machine_power_off)(void);
2948
2949-extern unsigned long arch_align_stack(unsigned long sp);
2950+#define arch_align_stack(x) ((x) & ~0xfUL)
2951
2952 static inline int tprot(unsigned long addr)
2953 {
2954diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
2955index 2b23885..e136e31 100644
2956--- a/arch/s390/include/asm/uaccess.h
2957+++ b/arch/s390/include/asm/uaccess.h
2958@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2959 copy_to_user(void __user *to, const void *from, unsigned long n)
2960 {
2961 might_fault();
2962+
2963+ if ((long)n < 0)
2964+ return n;
2965+
2966 if (access_ok(VERIFY_WRITE, to, n))
2967 n = __copy_to_user(to, from, n);
2968 return n;
2969@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
2970 static inline unsigned long __must_check
2971 __copy_from_user(void *to, const void __user *from, unsigned long n)
2972 {
2973+ if ((long)n < 0)
2974+ return n;
2975+
2976 if (__builtin_constant_p(n) && (n <= 256))
2977 return uaccess.copy_from_user_small(n, from, to);
2978 else
2979@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
2980 unsigned int sz = __compiletime_object_size(to);
2981
2982 might_fault();
2983+
2984+ if ((long)n < 0)
2985+ return n;
2986+
2987 if (unlikely(sz != -1 && sz < n)) {
2988 copy_from_user_overflow();
2989 return n;
2990diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
2991index dfcb343..eda788a 100644
2992--- a/arch/s390/kernel/module.c
2993+++ b/arch/s390/kernel/module.c
2994@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2995
2996 /* Increase core size by size of got & plt and set start
2997 offsets for got and plt. */
2998- me->core_size = ALIGN(me->core_size, 4);
2999- me->arch.got_offset = me->core_size;
3000- me->core_size += me->arch.got_size;
3001- me->arch.plt_offset = me->core_size;
3002- me->core_size += me->arch.plt_size;
3003+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3004+ me->arch.got_offset = me->core_size_rw;
3005+ me->core_size_rw += me->arch.got_size;
3006+ me->arch.plt_offset = me->core_size_rx;
3007+ me->core_size_rx += me->arch.plt_size;
3008 return 0;
3009 }
3010
3011@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3012 if (info->got_initialized == 0) {
3013 Elf_Addr *gotent;
3014
3015- gotent = me->module_core + me->arch.got_offset +
3016+ gotent = me->module_core_rw + me->arch.got_offset +
3017 info->got_offset;
3018 *gotent = val;
3019 info->got_initialized = 1;
3020@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3021 else if (r_type == R_390_GOTENT ||
3022 r_type == R_390_GOTPLTENT)
3023 *(unsigned int *) loc =
3024- (val + (Elf_Addr) me->module_core - loc) >> 1;
3025+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3026 else if (r_type == R_390_GOT64 ||
3027 r_type == R_390_GOTPLT64)
3028 *(unsigned long *) loc = val;
3029@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3030 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3031 if (info->plt_initialized == 0) {
3032 unsigned int *ip;
3033- ip = me->module_core + me->arch.plt_offset +
3034+ ip = me->module_core_rx + me->arch.plt_offset +
3035 info->plt_offset;
3036 #ifndef CONFIG_64BIT
3037 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3038@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3039 val - loc + 0xffffUL < 0x1ffffeUL) ||
3040 (r_type == R_390_PLT32DBL &&
3041 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3042- val = (Elf_Addr) me->module_core +
3043+ val = (Elf_Addr) me->module_core_rx +
3044 me->arch.plt_offset +
3045 info->plt_offset;
3046 val += rela->r_addend - loc;
3047@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3048 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3049 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3050 val = val + rela->r_addend -
3051- ((Elf_Addr) me->module_core + me->arch.got_offset);
3052+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3053 if (r_type == R_390_GOTOFF16)
3054 *(unsigned short *) loc = val;
3055 else if (r_type == R_390_GOTOFF32)
3056@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3057 break;
3058 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3059 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3060- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3061+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3062 rela->r_addend - loc;
3063 if (r_type == R_390_GOTPC)
3064 *(unsigned int *) loc = val;
3065diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3066index 9451b21..ed8956f 100644
3067--- a/arch/s390/kernel/process.c
3068+++ b/arch/s390/kernel/process.c
3069@@ -321,39 +321,3 @@ unsigned long get_wchan(struct task_struct *p)
3070 }
3071 return 0;
3072 }
3073-
3074-unsigned long arch_align_stack(unsigned long sp)
3075-{
3076- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3077- sp -= get_random_int() & ~PAGE_MASK;
3078- return sp & ~0xf;
3079-}
3080-
3081-static inline unsigned long brk_rnd(void)
3082-{
3083- /* 8MB for 32bit, 1GB for 64bit */
3084- if (is_32bit_task())
3085- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3086- else
3087- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3088-}
3089-
3090-unsigned long arch_randomize_brk(struct mm_struct *mm)
3091-{
3092- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3093-
3094- if (ret < mm->brk)
3095- return mm->brk;
3096- return ret;
3097-}
3098-
3099-unsigned long randomize_et_dyn(unsigned long base)
3100-{
3101- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3102-
3103- if (!(current->flags & PF_RANDOMIZE))
3104- return base;
3105- if (ret < base)
3106- return base;
3107- return ret;
3108-}
3109diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3110index f09c748..cf9ec1d 100644
3111--- a/arch/s390/mm/mmap.c
3112+++ b/arch/s390/mm/mmap.c
3113@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3114 */
3115 if (mmap_is_legacy()) {
3116 mm->mmap_base = TASK_UNMAPPED_BASE;
3117+
3118+#ifdef CONFIG_PAX_RANDMMAP
3119+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3120+ mm->mmap_base += mm->delta_mmap;
3121+#endif
3122+
3123 mm->get_unmapped_area = arch_get_unmapped_area;
3124 mm->unmap_area = arch_unmap_area;
3125 } else {
3126 mm->mmap_base = mmap_base();
3127+
3128+#ifdef CONFIG_PAX_RANDMMAP
3129+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3130+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3131+#endif
3132+
3133 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3134 mm->unmap_area = arch_unmap_area_topdown;
3135 }
3136@@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3137 */
3138 if (mmap_is_legacy()) {
3139 mm->mmap_base = TASK_UNMAPPED_BASE;
3140+
3141+#ifdef CONFIG_PAX_RANDMMAP
3142+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3143+ mm->mmap_base += mm->delta_mmap;
3144+#endif
3145+
3146 mm->get_unmapped_area = s390_get_unmapped_area;
3147 mm->unmap_area = arch_unmap_area;
3148 } else {
3149 mm->mmap_base = mmap_base();
3150+
3151+#ifdef CONFIG_PAX_RANDMMAP
3152+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3153+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3154+#endif
3155+
3156 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3157 mm->unmap_area = arch_unmap_area_topdown;
3158 }
3159diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3160index 589d5c7..669e274 100644
3161--- a/arch/score/include/asm/system.h
3162+++ b/arch/score/include/asm/system.h
3163@@ -17,7 +17,7 @@ do { \
3164 #define finish_arch_switch(prev) do {} while (0)
3165
3166 typedef void (*vi_handler_t)(void);
3167-extern unsigned long arch_align_stack(unsigned long sp);
3168+#define arch_align_stack(x) (x)
3169
3170 #define mb() barrier()
3171 #define rmb() barrier()
3172diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3173index 25d0803..d6c8e36 100644
3174--- a/arch/score/kernel/process.c
3175+++ b/arch/score/kernel/process.c
3176@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3177
3178 return task_pt_regs(task)->cp0_epc;
3179 }
3180-
3181-unsigned long arch_align_stack(unsigned long sp)
3182-{
3183- return sp;
3184-}
3185diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3186index afeb710..d1d1289 100644
3187--- a/arch/sh/mm/mmap.c
3188+++ b/arch/sh/mm/mmap.c
3189@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3190 addr = PAGE_ALIGN(addr);
3191
3192 vma = find_vma(mm, addr);
3193- if (TASK_SIZE - len >= addr &&
3194- (!vma || addr + len <= vma->vm_start))
3195+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3196 return addr;
3197 }
3198
3199@@ -106,7 +105,7 @@ full_search:
3200 }
3201 return -ENOMEM;
3202 }
3203- if (likely(!vma || addr + len <= vma->vm_start)) {
3204+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3205 /*
3206 * Remember the place where we stopped the search:
3207 */
3208@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3209 addr = PAGE_ALIGN(addr);
3210
3211 vma = find_vma(mm, addr);
3212- if (TASK_SIZE - len >= addr &&
3213- (!vma || addr + len <= vma->vm_start))
3214+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3215 return addr;
3216 }
3217
3218@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3219 /* make sure it can fit in the remaining address space */
3220 if (likely(addr > len)) {
3221 vma = find_vma(mm, addr-len);
3222- if (!vma || addr <= vma->vm_start) {
3223+ if (check_heap_stack_gap(vma, addr - len, len)) {
3224 /* remember the address as a hint for next time */
3225 return (mm->free_area_cache = addr-len);
3226 }
3227@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3228 if (unlikely(mm->mmap_base < len))
3229 goto bottomup;
3230
3231- addr = mm->mmap_base-len;
3232- if (do_colour_align)
3233- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3234+ addr = mm->mmap_base - len;
3235
3236 do {
3237+ if (do_colour_align)
3238+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3239 /*
3240 * Lookup failure means no vma is above this address,
3241 * else if new region fits below vma->vm_start,
3242 * return with success:
3243 */
3244 vma = find_vma(mm, addr);
3245- if (likely(!vma || addr+len <= vma->vm_start)) {
3246+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3247 /* remember the address as a hint for next time */
3248 return (mm->free_area_cache = addr);
3249 }
3250@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3251 mm->cached_hole_size = vma->vm_start - addr;
3252
3253 /* try just below the current vma->vm_start */
3254- addr = vma->vm_start-len;
3255- if (do_colour_align)
3256- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3257- } while (likely(len < vma->vm_start));
3258+ addr = skip_heap_stack_gap(vma, len);
3259+ } while (!IS_ERR_VALUE(addr));
3260
3261 bottomup:
3262 /*
3263diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3264index ad1fb5d..fc5315b 100644
3265--- a/arch/sparc/Makefile
3266+++ b/arch/sparc/Makefile
3267@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3268 # Export what is needed by arch/sparc/boot/Makefile
3269 export VMLINUX_INIT VMLINUX_MAIN
3270 VMLINUX_INIT := $(head-y) $(init-y)
3271-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3272+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3273 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3274 VMLINUX_MAIN += $(drivers-y) $(net-y)
3275
3276diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3277index 9f421df..b81fc12 100644
3278--- a/arch/sparc/include/asm/atomic_64.h
3279+++ b/arch/sparc/include/asm/atomic_64.h
3280@@ -14,18 +14,40 @@
3281 #define ATOMIC64_INIT(i) { (i) }
3282
3283 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3284+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3285+{
3286+ return v->counter;
3287+}
3288 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3289+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3290+{
3291+ return v->counter;
3292+}
3293
3294 #define atomic_set(v, i) (((v)->counter) = i)
3295+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3296+{
3297+ v->counter = i;
3298+}
3299 #define atomic64_set(v, i) (((v)->counter) = i)
3300+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3301+{
3302+ v->counter = i;
3303+}
3304
3305 extern void atomic_add(int, atomic_t *);
3306+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3307 extern void atomic64_add(long, atomic64_t *);
3308+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3309 extern void atomic_sub(int, atomic_t *);
3310+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3311 extern void atomic64_sub(long, atomic64_t *);
3312+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3313
3314 extern int atomic_add_ret(int, atomic_t *);
3315+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3316 extern long atomic64_add_ret(long, atomic64_t *);
3317+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3318 extern int atomic_sub_ret(int, atomic_t *);
3319 extern long atomic64_sub_ret(long, atomic64_t *);
3320
3321@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3322 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3323
3324 #define atomic_inc_return(v) atomic_add_ret(1, v)
3325+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3326+{
3327+ return atomic_add_ret_unchecked(1, v);
3328+}
3329 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3330+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3331+{
3332+ return atomic64_add_ret_unchecked(1, v);
3333+}
3334
3335 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3336 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3337
3338 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3339+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3340+{
3341+ return atomic_add_ret_unchecked(i, v);
3342+}
3343 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3344+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3345+{
3346+ return atomic64_add_ret_unchecked(i, v);
3347+}
3348
3349 /*
3350 * atomic_inc_and_test - increment and test
3351@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3352 * other cases.
3353 */
3354 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3355+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3356+{
3357+ return atomic_inc_return_unchecked(v) == 0;
3358+}
3359 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3360
3361 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3362@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3363 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3364
3365 #define atomic_inc(v) atomic_add(1, v)
3366+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3367+{
3368+ atomic_add_unchecked(1, v);
3369+}
3370 #define atomic64_inc(v) atomic64_add(1, v)
3371+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3372+{
3373+ atomic64_add_unchecked(1, v);
3374+}
3375
3376 #define atomic_dec(v) atomic_sub(1, v)
3377+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3378+{
3379+ atomic_sub_unchecked(1, v);
3380+}
3381 #define atomic64_dec(v) atomic64_sub(1, v)
3382+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3383+{
3384+ atomic64_sub_unchecked(1, v);
3385+}
3386
3387 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3388 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3389
3390 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3391+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3392+{
3393+ return cmpxchg(&v->counter, old, new);
3394+}
3395 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3396+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3397+{
3398+ return xchg(&v->counter, new);
3399+}
3400
3401 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3402 {
3403- int c, old;
3404+ int c, old, new;
3405 c = atomic_read(v);
3406 for (;;) {
3407- if (unlikely(c == (u)))
3408+ if (unlikely(c == u))
3409 break;
3410- old = atomic_cmpxchg((v), c, c + (a));
3411+
3412+ asm volatile("addcc %2, %0, %0\n"
3413+
3414+#ifdef CONFIG_PAX_REFCOUNT
3415+ "tvs %%icc, 6\n"
3416+#endif
3417+
3418+ : "=r" (new)
3419+ : "0" (c), "ir" (a)
3420+ : "cc");
3421+
3422+ old = atomic_cmpxchg(v, c, new);
3423 if (likely(old == c))
3424 break;
3425 c = old;
3426@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3427 #define atomic64_cmpxchg(v, o, n) \
3428 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3429 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3430+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3431+{
3432+ return xchg(&v->counter, new);
3433+}
3434
3435 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3436 {
3437- long c, old;
3438+ long c, old, new;
3439 c = atomic64_read(v);
3440 for (;;) {
3441- if (unlikely(c == (u)))
3442+ if (unlikely(c == u))
3443 break;
3444- old = atomic64_cmpxchg((v), c, c + (a));
3445+
3446+ asm volatile("addcc %2, %0, %0\n"
3447+
3448+#ifdef CONFIG_PAX_REFCOUNT
3449+ "tvs %%xcc, 6\n"
3450+#endif
3451+
3452+ : "=r" (new)
3453+ : "0" (c), "ir" (a)
3454+ : "cc");
3455+
3456+ old = atomic64_cmpxchg(v, c, new);
3457 if (likely(old == c))
3458 break;
3459 c = old;
3460 }
3461- return c != (u);
3462+ return c != u;
3463 }
3464
3465 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3466diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3467index 69358b5..17b4745 100644
3468--- a/arch/sparc/include/asm/cache.h
3469+++ b/arch/sparc/include/asm/cache.h
3470@@ -10,7 +10,7 @@
3471 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3472
3473 #define L1_CACHE_SHIFT 5
3474-#define L1_CACHE_BYTES 32
3475+#define L1_CACHE_BYTES 32UL
3476
3477 #ifdef CONFIG_SPARC32
3478 #define SMP_CACHE_BYTES_SHIFT 5
3479diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3480index 4269ca6..e3da77f 100644
3481--- a/arch/sparc/include/asm/elf_32.h
3482+++ b/arch/sparc/include/asm/elf_32.h
3483@@ -114,6 +114,13 @@ typedef struct {
3484
3485 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3486
3487+#ifdef CONFIG_PAX_ASLR
3488+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3489+
3490+#define PAX_DELTA_MMAP_LEN 16
3491+#define PAX_DELTA_STACK_LEN 16
3492+#endif
3493+
3494 /* This yields a mask that user programs can use to figure out what
3495 instruction set this cpu supports. This can NOT be done in userspace
3496 on Sparc. */
3497diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3498index 7df8b7f..4946269 100644
3499--- a/arch/sparc/include/asm/elf_64.h
3500+++ b/arch/sparc/include/asm/elf_64.h
3501@@ -180,6 +180,13 @@ typedef struct {
3502 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3503 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3504
3505+#ifdef CONFIG_PAX_ASLR
3506+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3507+
3508+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3509+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3510+#endif
3511+
3512 extern unsigned long sparc64_elf_hwcap;
3513 #define ELF_HWCAP sparc64_elf_hwcap
3514
3515diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3516index a790cc6..091ed94 100644
3517--- a/arch/sparc/include/asm/pgtable_32.h
3518+++ b/arch/sparc/include/asm/pgtable_32.h
3519@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3520 BTFIXUPDEF_INT(page_none)
3521 BTFIXUPDEF_INT(page_copy)
3522 BTFIXUPDEF_INT(page_readonly)
3523+
3524+#ifdef CONFIG_PAX_PAGEEXEC
3525+BTFIXUPDEF_INT(page_shared_noexec)
3526+BTFIXUPDEF_INT(page_copy_noexec)
3527+BTFIXUPDEF_INT(page_readonly_noexec)
3528+#endif
3529+
3530 BTFIXUPDEF_INT(page_kernel)
3531
3532 #define PMD_SHIFT SUN4C_PMD_SHIFT
3533@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3534 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3535 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3536
3537+#ifdef CONFIG_PAX_PAGEEXEC
3538+extern pgprot_t PAGE_SHARED_NOEXEC;
3539+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3540+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3541+#else
3542+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3543+# define PAGE_COPY_NOEXEC PAGE_COPY
3544+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3545+#endif
3546+
3547 extern unsigned long page_kernel;
3548
3549 #ifdef MODULE
3550diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3551index f6ae2b2..b03ffc7 100644
3552--- a/arch/sparc/include/asm/pgtsrmmu.h
3553+++ b/arch/sparc/include/asm/pgtsrmmu.h
3554@@ -115,6 +115,13 @@
3555 SRMMU_EXEC | SRMMU_REF)
3556 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3557 SRMMU_EXEC | SRMMU_REF)
3558+
3559+#ifdef CONFIG_PAX_PAGEEXEC
3560+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3561+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3562+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3563+#endif
3564+
3565 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3566 SRMMU_DIRTY | SRMMU_REF)
3567
3568diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3569index 9689176..63c18ea 100644
3570--- a/arch/sparc/include/asm/spinlock_64.h
3571+++ b/arch/sparc/include/asm/spinlock_64.h
3572@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3573
3574 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3575
3576-static void inline arch_read_lock(arch_rwlock_t *lock)
3577+static inline void arch_read_lock(arch_rwlock_t *lock)
3578 {
3579 unsigned long tmp1, tmp2;
3580
3581 __asm__ __volatile__ (
3582 "1: ldsw [%2], %0\n"
3583 " brlz,pn %0, 2f\n"
3584-"4: add %0, 1, %1\n"
3585+"4: addcc %0, 1, %1\n"
3586+
3587+#ifdef CONFIG_PAX_REFCOUNT
3588+" tvs %%icc, 6\n"
3589+#endif
3590+
3591 " cas [%2], %0, %1\n"
3592 " cmp %0, %1\n"
3593 " bne,pn %%icc, 1b\n"
3594@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3595 " .previous"
3596 : "=&r" (tmp1), "=&r" (tmp2)
3597 : "r" (lock)
3598- : "memory");
3599+ : "memory", "cc");
3600 }
3601
3602-static int inline arch_read_trylock(arch_rwlock_t *lock)
3603+static inline int arch_read_trylock(arch_rwlock_t *lock)
3604 {
3605 int tmp1, tmp2;
3606
3607@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3608 "1: ldsw [%2], %0\n"
3609 " brlz,a,pn %0, 2f\n"
3610 " mov 0, %0\n"
3611-" add %0, 1, %1\n"
3612+" addcc %0, 1, %1\n"
3613+
3614+#ifdef CONFIG_PAX_REFCOUNT
3615+" tvs %%icc, 6\n"
3616+#endif
3617+
3618 " cas [%2], %0, %1\n"
3619 " cmp %0, %1\n"
3620 " bne,pn %%icc, 1b\n"
3621@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3622 return tmp1;
3623 }
3624
3625-static void inline arch_read_unlock(arch_rwlock_t *lock)
3626+static inline void arch_read_unlock(arch_rwlock_t *lock)
3627 {
3628 unsigned long tmp1, tmp2;
3629
3630 __asm__ __volatile__(
3631 "1: lduw [%2], %0\n"
3632-" sub %0, 1, %1\n"
3633+" subcc %0, 1, %1\n"
3634+
3635+#ifdef CONFIG_PAX_REFCOUNT
3636+" tvs %%icc, 6\n"
3637+#endif
3638+
3639 " cas [%2], %0, %1\n"
3640 " cmp %0, %1\n"
3641 " bne,pn %%xcc, 1b\n"
3642@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3643 : "memory");
3644 }
3645
3646-static void inline arch_write_lock(arch_rwlock_t *lock)
3647+static inline void arch_write_lock(arch_rwlock_t *lock)
3648 {
3649 unsigned long mask, tmp1, tmp2;
3650
3651@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3652 : "memory");
3653 }
3654
3655-static void inline arch_write_unlock(arch_rwlock_t *lock)
3656+static inline void arch_write_unlock(arch_rwlock_t *lock)
3657 {
3658 __asm__ __volatile__(
3659 " stw %%g0, [%0]"
3660@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3661 : "memory");
3662 }
3663
3664-static int inline arch_write_trylock(arch_rwlock_t *lock)
3665+static inline int arch_write_trylock(arch_rwlock_t *lock)
3666 {
3667 unsigned long mask, tmp1, tmp2, result;
3668
3669diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3670index fa57532..e1a4c53 100644
3671--- a/arch/sparc/include/asm/thread_info_32.h
3672+++ b/arch/sparc/include/asm/thread_info_32.h
3673@@ -50,6 +50,8 @@ struct thread_info {
3674 unsigned long w_saved;
3675
3676 struct restart_block restart_block;
3677+
3678+ unsigned long lowest_stack;
3679 };
3680
3681 /*
3682diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3683index 60d86be..952dea1 100644
3684--- a/arch/sparc/include/asm/thread_info_64.h
3685+++ b/arch/sparc/include/asm/thread_info_64.h
3686@@ -63,6 +63,8 @@ struct thread_info {
3687 struct pt_regs *kern_una_regs;
3688 unsigned int kern_una_insn;
3689
3690+ unsigned long lowest_stack;
3691+
3692 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3693 };
3694
3695diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3696index e88fbe5..96b0ce5 100644
3697--- a/arch/sparc/include/asm/uaccess.h
3698+++ b/arch/sparc/include/asm/uaccess.h
3699@@ -1,5 +1,13 @@
3700 #ifndef ___ASM_SPARC_UACCESS_H
3701 #define ___ASM_SPARC_UACCESS_H
3702+
3703+#ifdef __KERNEL__
3704+#ifndef __ASSEMBLY__
3705+#include <linux/types.h>
3706+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3707+#endif
3708+#endif
3709+
3710 #if defined(__sparc__) && defined(__arch64__)
3711 #include <asm/uaccess_64.h>
3712 #else
3713diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3714index 8303ac4..07f333d 100644
3715--- a/arch/sparc/include/asm/uaccess_32.h
3716+++ b/arch/sparc/include/asm/uaccess_32.h
3717@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3718
3719 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3720 {
3721- if (n && __access_ok((unsigned long) to, n))
3722+ if ((long)n < 0)
3723+ return n;
3724+
3725+ if (n && __access_ok((unsigned long) to, n)) {
3726+ if (!__builtin_constant_p(n))
3727+ check_object_size(from, n, true);
3728 return __copy_user(to, (__force void __user *) from, n);
3729- else
3730+ } else
3731 return n;
3732 }
3733
3734 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3735 {
3736+ if ((long)n < 0)
3737+ return n;
3738+
3739+ if (!__builtin_constant_p(n))
3740+ check_object_size(from, n, true);
3741+
3742 return __copy_user(to, (__force void __user *) from, n);
3743 }
3744
3745 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3746 {
3747- if (n && __access_ok((unsigned long) from, n))
3748+ if ((long)n < 0)
3749+ return n;
3750+
3751+ if (n && __access_ok((unsigned long) from, n)) {
3752+ if (!__builtin_constant_p(n))
3753+ check_object_size(to, n, false);
3754 return __copy_user((__force void __user *) to, from, n);
3755- else
3756+ } else
3757 return n;
3758 }
3759
3760 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3761 {
3762+ if ((long)n < 0)
3763+ return n;
3764+
3765 return __copy_user((__force void __user *) to, from, n);
3766 }
3767
3768diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
3769index 3e1449f..5293a0e 100644
3770--- a/arch/sparc/include/asm/uaccess_64.h
3771+++ b/arch/sparc/include/asm/uaccess_64.h
3772@@ -10,6 +10,7 @@
3773 #include <linux/compiler.h>
3774 #include <linux/string.h>
3775 #include <linux/thread_info.h>
3776+#include <linux/kernel.h>
3777 #include <asm/asi.h>
3778 #include <asm/system.h>
3779 #include <asm/spitfire.h>
3780@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
3781 static inline unsigned long __must_check
3782 copy_from_user(void *to, const void __user *from, unsigned long size)
3783 {
3784- unsigned long ret = ___copy_from_user(to, from, size);
3785+ unsigned long ret;
3786
3787+ if ((long)size < 0 || size > INT_MAX)
3788+ return size;
3789+
3790+ if (!__builtin_constant_p(size))
3791+ check_object_size(to, size, false);
3792+
3793+ ret = ___copy_from_user(to, from, size);
3794 if (unlikely(ret))
3795 ret = copy_from_user_fixup(to, from, size);
3796
3797@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
3798 static inline unsigned long __must_check
3799 copy_to_user(void __user *to, const void *from, unsigned long size)
3800 {
3801- unsigned long ret = ___copy_to_user(to, from, size);
3802+ unsigned long ret;
3803
3804+ if ((long)size < 0 || size > INT_MAX)
3805+ return size;
3806+
3807+ if (!__builtin_constant_p(size))
3808+ check_object_size(from, size, true);
3809+
3810+ ret = ___copy_to_user(to, from, size);
3811 if (unlikely(ret))
3812 ret = copy_to_user_fixup(to, from, size);
3813 return ret;
3814diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
3815index cb85458..e063f17 100644
3816--- a/arch/sparc/kernel/Makefile
3817+++ b/arch/sparc/kernel/Makefile
3818@@ -3,7 +3,7 @@
3819 #
3820
3821 asflags-y := -ansi
3822-ccflags-y := -Werror
3823+#ccflags-y := -Werror
3824
3825 extra-y := head_$(BITS).o
3826 extra-y += init_task.o
3827diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
3828index f793742..4d880af 100644
3829--- a/arch/sparc/kernel/process_32.c
3830+++ b/arch/sparc/kernel/process_32.c
3831@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3832 rw->ins[4], rw->ins[5],
3833 rw->ins[6],
3834 rw->ins[7]);
3835- printk("%pS\n", (void *) rw->ins[7]);
3836+ printk("%pA\n", (void *) rw->ins[7]);
3837 rw = (struct reg_window32 *) rw->ins[6];
3838 }
3839 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3840@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3841
3842 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3843 r->psr, r->pc, r->npc, r->y, print_tainted());
3844- printk("PC: <%pS>\n", (void *) r->pc);
3845+ printk("PC: <%pA>\n", (void *) r->pc);
3846 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3847 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3848 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3849 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3850 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3851 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3852- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3853+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3854
3855 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3856 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3857@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
3858 rw = (struct reg_window32 *) fp;
3859 pc = rw->ins[7];
3860 printk("[%08lx : ", pc);
3861- printk("%pS ] ", (void *) pc);
3862+ printk("%pA ] ", (void *) pc);
3863 fp = rw->ins[6];
3864 } while (++count < 16);
3865 printk("\n");
3866diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
3867index 3739a06..48b2ff0 100644
3868--- a/arch/sparc/kernel/process_64.c
3869+++ b/arch/sparc/kernel/process_64.c
3870@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
3871 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3872 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3873 if (regs->tstate & TSTATE_PRIV)
3874- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3875+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3876 }
3877
3878 void show_regs(struct pt_regs *regs)
3879 {
3880 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3881 regs->tpc, regs->tnpc, regs->y, print_tainted());
3882- printk("TPC: <%pS>\n", (void *) regs->tpc);
3883+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3884 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3885 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3886 regs->u_regs[3]);
3887@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3888 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3889 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3890 regs->u_regs[15]);
3891- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3892+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3893 show_regwindow(regs);
3894 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3895 }
3896@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
3897 ((tp && tp->task) ? tp->task->pid : -1));
3898
3899 if (gp->tstate & TSTATE_PRIV) {
3900- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3901+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3902 (void *) gp->tpc,
3903 (void *) gp->o7,
3904 (void *) gp->i7,
3905diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
3906index 42b282f..28ce9f2 100644
3907--- a/arch/sparc/kernel/sys_sparc_32.c
3908+++ b/arch/sparc/kernel/sys_sparc_32.c
3909@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3910 if (ARCH_SUN4C && len > 0x20000000)
3911 return -ENOMEM;
3912 if (!addr)
3913- addr = TASK_UNMAPPED_BASE;
3914+ addr = current->mm->mmap_base;
3915
3916 if (flags & MAP_SHARED)
3917 addr = COLOUR_ALIGN(addr);
3918@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3919 }
3920 if (TASK_SIZE - PAGE_SIZE - len < addr)
3921 return -ENOMEM;
3922- if (!vmm || addr + len <= vmm->vm_start)
3923+ if (check_heap_stack_gap(vmm, addr, len))
3924 return addr;
3925 addr = vmm->vm_end;
3926 if (flags & MAP_SHARED)
3927diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
3928index 441521a..b767073 100644
3929--- a/arch/sparc/kernel/sys_sparc_64.c
3930+++ b/arch/sparc/kernel/sys_sparc_64.c
3931@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3932 /* We do not accept a shared mapping if it would violate
3933 * cache aliasing constraints.
3934 */
3935- if ((flags & MAP_SHARED) &&
3936+ if ((filp || (flags & MAP_SHARED)) &&
3937 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3938 return -EINVAL;
3939 return addr;
3940@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3941 if (filp || (flags & MAP_SHARED))
3942 do_color_align = 1;
3943
3944+#ifdef CONFIG_PAX_RANDMMAP
3945+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3946+#endif
3947+
3948 if (addr) {
3949 if (do_color_align)
3950 addr = COLOUR_ALIGN(addr, pgoff);
3951@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3952 addr = PAGE_ALIGN(addr);
3953
3954 vma = find_vma(mm, addr);
3955- if (task_size - len >= addr &&
3956- (!vma || addr + len <= vma->vm_start))
3957+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3958 return addr;
3959 }
3960
3961 if (len > mm->cached_hole_size) {
3962- start_addr = addr = mm->free_area_cache;
3963+ start_addr = addr = mm->free_area_cache;
3964 } else {
3965- start_addr = addr = TASK_UNMAPPED_BASE;
3966+ start_addr = addr = mm->mmap_base;
3967 mm->cached_hole_size = 0;
3968 }
3969
3970@@ -174,14 +177,14 @@ full_search:
3971 vma = find_vma(mm, VA_EXCLUDE_END);
3972 }
3973 if (unlikely(task_size < addr)) {
3974- if (start_addr != TASK_UNMAPPED_BASE) {
3975- start_addr = addr = TASK_UNMAPPED_BASE;
3976+ if (start_addr != mm->mmap_base) {
3977+ start_addr = addr = mm->mmap_base;
3978 mm->cached_hole_size = 0;
3979 goto full_search;
3980 }
3981 return -ENOMEM;
3982 }
3983- if (likely(!vma || addr + len <= vma->vm_start)) {
3984+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3985 /*
3986 * Remember the place where we stopped the search:
3987 */
3988@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3989 /* We do not accept a shared mapping if it would violate
3990 * cache aliasing constraints.
3991 */
3992- if ((flags & MAP_SHARED) &&
3993+ if ((filp || (flags & MAP_SHARED)) &&
3994 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3995 return -EINVAL;
3996 return addr;
3997@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3998 addr = PAGE_ALIGN(addr);
3999
4000 vma = find_vma(mm, addr);
4001- if (task_size - len >= addr &&
4002- (!vma || addr + len <= vma->vm_start))
4003+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4004 return addr;
4005 }
4006
4007@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4008 /* make sure it can fit in the remaining address space */
4009 if (likely(addr > len)) {
4010 vma = find_vma(mm, addr-len);
4011- if (!vma || addr <= vma->vm_start) {
4012+ if (check_heap_stack_gap(vma, addr - len, len)) {
4013 /* remember the address as a hint for next time */
4014 return (mm->free_area_cache = addr-len);
4015 }
4016@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4017 if (unlikely(mm->mmap_base < len))
4018 goto bottomup;
4019
4020- addr = mm->mmap_base-len;
4021- if (do_color_align)
4022- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4023+ addr = mm->mmap_base - len;
4024
4025 do {
4026+ if (do_color_align)
4027+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4028 /*
4029 * Lookup failure means no vma is above this address,
4030 * else if new region fits below vma->vm_start,
4031 * return with success:
4032 */
4033 vma = find_vma(mm, addr);
4034- if (likely(!vma || addr+len <= vma->vm_start)) {
4035+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4036 /* remember the address as a hint for next time */
4037 return (mm->free_area_cache = addr);
4038 }
4039@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4040 mm->cached_hole_size = vma->vm_start - addr;
4041
4042 /* try just below the current vma->vm_start */
4043- addr = vma->vm_start-len;
4044- if (do_color_align)
4045- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4046- } while (likely(len < vma->vm_start));
4047+ addr = skip_heap_stack_gap(vma, len);
4048+ } while (!IS_ERR_VALUE(addr));
4049
4050 bottomup:
4051 /*
4052@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4053 gap == RLIM_INFINITY ||
4054 sysctl_legacy_va_layout) {
4055 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4056+
4057+#ifdef CONFIG_PAX_RANDMMAP
4058+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4059+ mm->mmap_base += mm->delta_mmap;
4060+#endif
4061+
4062 mm->get_unmapped_area = arch_get_unmapped_area;
4063 mm->unmap_area = arch_unmap_area;
4064 } else {
4065@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4066 gap = (task_size / 6 * 5);
4067
4068 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4069+
4070+#ifdef CONFIG_PAX_RANDMMAP
4071+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4072+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4073+#endif
4074+
4075 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4076 mm->unmap_area = arch_unmap_area_topdown;
4077 }
4078diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4079index 591f20c..0f1b925 100644
4080--- a/arch/sparc/kernel/traps_32.c
4081+++ b/arch/sparc/kernel/traps_32.c
4082@@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
4083 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4084 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4085
4086+extern void gr_handle_kernel_exploit(void);
4087+
4088 void die_if_kernel(char *str, struct pt_regs *regs)
4089 {
4090 static int die_counter;
4091@@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4092 count++ < 30 &&
4093 (((unsigned long) rw) >= PAGE_OFFSET) &&
4094 !(((unsigned long) rw) & 0x7)) {
4095- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4096+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4097 (void *) rw->ins[7]);
4098 rw = (struct reg_window32 *)rw->ins[6];
4099 }
4100 }
4101 printk("Instruction DUMP:");
4102 instruction_dump ((unsigned long *) regs->pc);
4103- if(regs->psr & PSR_PS)
4104+ if(regs->psr & PSR_PS) {
4105+ gr_handle_kernel_exploit();
4106 do_exit(SIGKILL);
4107+ }
4108 do_exit(SIGSEGV);
4109 }
4110
4111diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4112index 0cbdaa4..438e4c9 100644
4113--- a/arch/sparc/kernel/traps_64.c
4114+++ b/arch/sparc/kernel/traps_64.c
4115@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4116 i + 1,
4117 p->trapstack[i].tstate, p->trapstack[i].tpc,
4118 p->trapstack[i].tnpc, p->trapstack[i].tt);
4119- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4120+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4121 }
4122 }
4123
4124@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4125
4126 lvl -= 0x100;
4127 if (regs->tstate & TSTATE_PRIV) {
4128+
4129+#ifdef CONFIG_PAX_REFCOUNT
4130+ if (lvl == 6)
4131+ pax_report_refcount_overflow(regs);
4132+#endif
4133+
4134 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4135 die_if_kernel(buffer, regs);
4136 }
4137@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4138 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4139 {
4140 char buffer[32];
4141-
4142+
4143 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4144 0, lvl, SIGTRAP) == NOTIFY_STOP)
4145 return;
4146
4147+#ifdef CONFIG_PAX_REFCOUNT
4148+ if (lvl == 6)
4149+ pax_report_refcount_overflow(regs);
4150+#endif
4151+
4152 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4153
4154 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4155@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4156 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4157 printk("%s" "ERROR(%d): ",
4158 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4159- printk("TPC<%pS>\n", (void *) regs->tpc);
4160+ printk("TPC<%pA>\n", (void *) regs->tpc);
4161 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4162 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4163 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4164@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4165 smp_processor_id(),
4166 (type & 0x1) ? 'I' : 'D',
4167 regs->tpc);
4168- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4169+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4170 panic("Irrecoverable Cheetah+ parity error.");
4171 }
4172
4173@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4174 smp_processor_id(),
4175 (type & 0x1) ? 'I' : 'D',
4176 regs->tpc);
4177- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4178+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4179 }
4180
4181 struct sun4v_error_entry {
4182@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4183
4184 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4185 regs->tpc, tl);
4186- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4187+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4188 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4189- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4190+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4191 (void *) regs->u_regs[UREG_I7]);
4192 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4193 "pte[%lx] error[%lx]\n",
4194@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4195
4196 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4197 regs->tpc, tl);
4198- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4199+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4200 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4201- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4202+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4203 (void *) regs->u_regs[UREG_I7]);
4204 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4205 "pte[%lx] error[%lx]\n",
4206@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4207 fp = (unsigned long)sf->fp + STACK_BIAS;
4208 }
4209
4210- printk(" [%016lx] %pS\n", pc, (void *) pc);
4211+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4212 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4213 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4214 int index = tsk->curr_ret_stack;
4215 if (tsk->ret_stack && index >= graph) {
4216 pc = tsk->ret_stack[index - graph].ret;
4217- printk(" [%016lx] %pS\n", pc, (void *) pc);
4218+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4219 graph++;
4220 }
4221 }
4222@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4223 return (struct reg_window *) (fp + STACK_BIAS);
4224 }
4225
4226+extern void gr_handle_kernel_exploit(void);
4227+
4228 void die_if_kernel(char *str, struct pt_regs *regs)
4229 {
4230 static int die_counter;
4231@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4232 while (rw &&
4233 count++ < 30 &&
4234 kstack_valid(tp, (unsigned long) rw)) {
4235- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4236+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4237 (void *) rw->ins[7]);
4238
4239 rw = kernel_stack_up(rw);
4240@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4241 }
4242 user_instruction_dump ((unsigned int __user *) regs->tpc);
4243 }
4244- if (regs->tstate & TSTATE_PRIV)
4245+ if (regs->tstate & TSTATE_PRIV) {
4246+ gr_handle_kernel_exploit();
4247 do_exit(SIGKILL);
4248+ }
4249 do_exit(SIGSEGV);
4250 }
4251 EXPORT_SYMBOL(die_if_kernel);
4252diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4253index 76e4ac1..78f8bb1 100644
4254--- a/arch/sparc/kernel/unaligned_64.c
4255+++ b/arch/sparc/kernel/unaligned_64.c
4256@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4257 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4258
4259 if (__ratelimit(&ratelimit)) {
4260- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4261+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4262 regs->tpc, (void *) regs->tpc);
4263 }
4264 }
4265diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4266index a3fc437..fea9957 100644
4267--- a/arch/sparc/lib/Makefile
4268+++ b/arch/sparc/lib/Makefile
4269@@ -2,7 +2,7 @@
4270 #
4271
4272 asflags-y := -ansi -DST_DIV0=0x02
4273-ccflags-y := -Werror
4274+#ccflags-y := -Werror
4275
4276 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4277 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4278diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4279index 59186e0..f747d7a 100644
4280--- a/arch/sparc/lib/atomic_64.S
4281+++ b/arch/sparc/lib/atomic_64.S
4282@@ -18,7 +18,12 @@
4283 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4284 BACKOFF_SETUP(%o2)
4285 1: lduw [%o1], %g1
4286- add %g1, %o0, %g7
4287+ addcc %g1, %o0, %g7
4288+
4289+#ifdef CONFIG_PAX_REFCOUNT
4290+ tvs %icc, 6
4291+#endif
4292+
4293 cas [%o1], %g1, %g7
4294 cmp %g1, %g7
4295 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4296@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4297 2: BACKOFF_SPIN(%o2, %o3, 1b)
4298 .size atomic_add, .-atomic_add
4299
4300+ .globl atomic_add_unchecked
4301+ .type atomic_add_unchecked,#function
4302+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4303+ BACKOFF_SETUP(%o2)
4304+1: lduw [%o1], %g1
4305+ add %g1, %o0, %g7
4306+ cas [%o1], %g1, %g7
4307+ cmp %g1, %g7
4308+ bne,pn %icc, 2f
4309+ nop
4310+ retl
4311+ nop
4312+2: BACKOFF_SPIN(%o2, %o3, 1b)
4313+ .size atomic_add_unchecked, .-atomic_add_unchecked
4314+
4315 .globl atomic_sub
4316 .type atomic_sub,#function
4317 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4318 BACKOFF_SETUP(%o2)
4319 1: lduw [%o1], %g1
4320- sub %g1, %o0, %g7
4321+ subcc %g1, %o0, %g7
4322+
4323+#ifdef CONFIG_PAX_REFCOUNT
4324+ tvs %icc, 6
4325+#endif
4326+
4327 cas [%o1], %g1, %g7
4328 cmp %g1, %g7
4329 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4330@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4331 2: BACKOFF_SPIN(%o2, %o3, 1b)
4332 .size atomic_sub, .-atomic_sub
4333
4334+ .globl atomic_sub_unchecked
4335+ .type atomic_sub_unchecked,#function
4336+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4337+ BACKOFF_SETUP(%o2)
4338+1: lduw [%o1], %g1
4339+ sub %g1, %o0, %g7
4340+ cas [%o1], %g1, %g7
4341+ cmp %g1, %g7
4342+ bne,pn %icc, 2f
4343+ nop
4344+ retl
4345+ nop
4346+2: BACKOFF_SPIN(%o2, %o3, 1b)
4347+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4348+
4349 .globl atomic_add_ret
4350 .type atomic_add_ret,#function
4351 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4352 BACKOFF_SETUP(%o2)
4353 1: lduw [%o1], %g1
4354- add %g1, %o0, %g7
4355+ addcc %g1, %o0, %g7
4356+
4357+#ifdef CONFIG_PAX_REFCOUNT
4358+ tvs %icc, 6
4359+#endif
4360+
4361 cas [%o1], %g1, %g7
4362 cmp %g1, %g7
4363 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4364@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4365 2: BACKOFF_SPIN(%o2, %o3, 1b)
4366 .size atomic_add_ret, .-atomic_add_ret
4367
4368+ .globl atomic_add_ret_unchecked
4369+ .type atomic_add_ret_unchecked,#function
4370+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4371+ BACKOFF_SETUP(%o2)
4372+1: lduw [%o1], %g1
4373+ addcc %g1, %o0, %g7
4374+ cas [%o1], %g1, %g7
4375+ cmp %g1, %g7
4376+ bne,pn %icc, 2f
4377+ add %g7, %o0, %g7
4378+ sra %g7, 0, %o0
4379+ retl
4380+ nop
4381+2: BACKOFF_SPIN(%o2, %o3, 1b)
4382+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4383+
4384 .globl atomic_sub_ret
4385 .type atomic_sub_ret,#function
4386 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4387 BACKOFF_SETUP(%o2)
4388 1: lduw [%o1], %g1
4389- sub %g1, %o0, %g7
4390+ subcc %g1, %o0, %g7
4391+
4392+#ifdef CONFIG_PAX_REFCOUNT
4393+ tvs %icc, 6
4394+#endif
4395+
4396 cas [%o1], %g1, %g7
4397 cmp %g1, %g7
4398 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4399@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4400 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4401 BACKOFF_SETUP(%o2)
4402 1: ldx [%o1], %g1
4403- add %g1, %o0, %g7
4404+ addcc %g1, %o0, %g7
4405+
4406+#ifdef CONFIG_PAX_REFCOUNT
4407+ tvs %xcc, 6
4408+#endif
4409+
4410 casx [%o1], %g1, %g7
4411 cmp %g1, %g7
4412 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4413@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4414 2: BACKOFF_SPIN(%o2, %o3, 1b)
4415 .size atomic64_add, .-atomic64_add
4416
4417+ .globl atomic64_add_unchecked
4418+ .type atomic64_add_unchecked,#function
4419+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4420+ BACKOFF_SETUP(%o2)
4421+1: ldx [%o1], %g1
4422+ addcc %g1, %o0, %g7
4423+ casx [%o1], %g1, %g7
4424+ cmp %g1, %g7
4425+ bne,pn %xcc, 2f
4426+ nop
4427+ retl
4428+ nop
4429+2: BACKOFF_SPIN(%o2, %o3, 1b)
4430+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4431+
4432 .globl atomic64_sub
4433 .type atomic64_sub,#function
4434 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4435 BACKOFF_SETUP(%o2)
4436 1: ldx [%o1], %g1
4437- sub %g1, %o0, %g7
4438+ subcc %g1, %o0, %g7
4439+
4440+#ifdef CONFIG_PAX_REFCOUNT
4441+ tvs %xcc, 6
4442+#endif
4443+
4444 casx [%o1], %g1, %g7
4445 cmp %g1, %g7
4446 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4447@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4448 2: BACKOFF_SPIN(%o2, %o3, 1b)
4449 .size atomic64_sub, .-atomic64_sub
4450
4451+ .globl atomic64_sub_unchecked
4452+ .type atomic64_sub_unchecked,#function
4453+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4454+ BACKOFF_SETUP(%o2)
4455+1: ldx [%o1], %g1
4456+ subcc %g1, %o0, %g7
4457+ casx [%o1], %g1, %g7
4458+ cmp %g1, %g7
4459+ bne,pn %xcc, 2f
4460+ nop
4461+ retl
4462+ nop
4463+2: BACKOFF_SPIN(%o2, %o3, 1b)
4464+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4465+
4466 .globl atomic64_add_ret
4467 .type atomic64_add_ret,#function
4468 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4469 BACKOFF_SETUP(%o2)
4470 1: ldx [%o1], %g1
4471- add %g1, %o0, %g7
4472+ addcc %g1, %o0, %g7
4473+
4474+#ifdef CONFIG_PAX_REFCOUNT
4475+ tvs %xcc, 6
4476+#endif
4477+
4478 casx [%o1], %g1, %g7
4479 cmp %g1, %g7
4480 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4481@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4482 2: BACKOFF_SPIN(%o2, %o3, 1b)
4483 .size atomic64_add_ret, .-atomic64_add_ret
4484
4485+ .globl atomic64_add_ret_unchecked
4486+ .type atomic64_add_ret_unchecked,#function
4487+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4488+ BACKOFF_SETUP(%o2)
4489+1: ldx [%o1], %g1
4490+ addcc %g1, %o0, %g7
4491+ casx [%o1], %g1, %g7
4492+ cmp %g1, %g7
4493+ bne,pn %xcc, 2f
4494+ add %g7, %o0, %g7
4495+ mov %g7, %o0
4496+ retl
4497+ nop
4498+2: BACKOFF_SPIN(%o2, %o3, 1b)
4499+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4500+
4501 .globl atomic64_sub_ret
4502 .type atomic64_sub_ret,#function
4503 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4504 BACKOFF_SETUP(%o2)
4505 1: ldx [%o1], %g1
4506- sub %g1, %o0, %g7
4507+ subcc %g1, %o0, %g7
4508+
4509+#ifdef CONFIG_PAX_REFCOUNT
4510+ tvs %xcc, 6
4511+#endif
4512+
4513 casx [%o1], %g1, %g7
4514 cmp %g1, %g7
4515 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4516diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4517index 1b30bb3..b4a16c7 100644
4518--- a/arch/sparc/lib/ksyms.c
4519+++ b/arch/sparc/lib/ksyms.c
4520@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4521
4522 /* Atomic counter implementation. */
4523 EXPORT_SYMBOL(atomic_add);
4524+EXPORT_SYMBOL(atomic_add_unchecked);
4525 EXPORT_SYMBOL(atomic_add_ret);
4526+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4527 EXPORT_SYMBOL(atomic_sub);
4528+EXPORT_SYMBOL(atomic_sub_unchecked);
4529 EXPORT_SYMBOL(atomic_sub_ret);
4530 EXPORT_SYMBOL(atomic64_add);
4531+EXPORT_SYMBOL(atomic64_add_unchecked);
4532 EXPORT_SYMBOL(atomic64_add_ret);
4533+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4534 EXPORT_SYMBOL(atomic64_sub);
4535+EXPORT_SYMBOL(atomic64_sub_unchecked);
4536 EXPORT_SYMBOL(atomic64_sub_ret);
4537
4538 /* Atomic bit operations. */
4539diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4540index 301421c..e2535d1 100644
4541--- a/arch/sparc/mm/Makefile
4542+++ b/arch/sparc/mm/Makefile
4543@@ -2,7 +2,7 @@
4544 #
4545
4546 asflags-y := -ansi
4547-ccflags-y := -Werror
4548+#ccflags-y := -Werror
4549
4550 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4551 obj-y += fault_$(BITS).o
4552diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4553index 8023fd7..c8e89e9 100644
4554--- a/arch/sparc/mm/fault_32.c
4555+++ b/arch/sparc/mm/fault_32.c
4556@@ -21,6 +21,9 @@
4557 #include <linux/perf_event.h>
4558 #include <linux/interrupt.h>
4559 #include <linux/kdebug.h>
4560+#include <linux/slab.h>
4561+#include <linux/pagemap.h>
4562+#include <linux/compiler.h>
4563
4564 #include <asm/system.h>
4565 #include <asm/page.h>
4566@@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4567 return safe_compute_effective_address(regs, insn);
4568 }
4569
4570+#ifdef CONFIG_PAX_PAGEEXEC
4571+#ifdef CONFIG_PAX_DLRESOLVE
4572+static void pax_emuplt_close(struct vm_area_struct *vma)
4573+{
4574+ vma->vm_mm->call_dl_resolve = 0UL;
4575+}
4576+
4577+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4578+{
4579+ unsigned int *kaddr;
4580+
4581+ vmf->page = alloc_page(GFP_HIGHUSER);
4582+ if (!vmf->page)
4583+ return VM_FAULT_OOM;
4584+
4585+ kaddr = kmap(vmf->page);
4586+ memset(kaddr, 0, PAGE_SIZE);
4587+ kaddr[0] = 0x9DE3BFA8U; /* save */
4588+ flush_dcache_page(vmf->page);
4589+ kunmap(vmf->page);
4590+ return VM_FAULT_MAJOR;
4591+}
4592+
4593+static const struct vm_operations_struct pax_vm_ops = {
4594+ .close = pax_emuplt_close,
4595+ .fault = pax_emuplt_fault
4596+};
4597+
4598+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4599+{
4600+ int ret;
4601+
4602+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4603+ vma->vm_mm = current->mm;
4604+ vma->vm_start = addr;
4605+ vma->vm_end = addr + PAGE_SIZE;
4606+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4607+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4608+ vma->vm_ops = &pax_vm_ops;
4609+
4610+ ret = insert_vm_struct(current->mm, vma);
4611+ if (ret)
4612+ return ret;
4613+
4614+ ++current->mm->total_vm;
4615+ return 0;
4616+}
4617+#endif
4618+
4619+/*
4620+ * PaX: decide what to do with offenders (regs->pc = fault address)
4621+ *
4622+ * returns 1 when task should be killed
4623+ * 2 when patched PLT trampoline was detected
4624+ * 3 when unpatched PLT trampoline was detected
4625+ */
4626+static int pax_handle_fetch_fault(struct pt_regs *regs)
4627+{
4628+
4629+#ifdef CONFIG_PAX_EMUPLT
4630+ int err;
4631+
4632+ do { /* PaX: patched PLT emulation #1 */
4633+ unsigned int sethi1, sethi2, jmpl;
4634+
4635+ err = get_user(sethi1, (unsigned int *)regs->pc);
4636+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4637+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4638+
4639+ if (err)
4640+ break;
4641+
4642+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4643+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4644+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4645+ {
4646+ unsigned int addr;
4647+
4648+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4649+ addr = regs->u_regs[UREG_G1];
4650+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4651+ regs->pc = addr;
4652+ regs->npc = addr+4;
4653+ return 2;
4654+ }
4655+ } while (0);
4656+
4657+ { /* PaX: patched PLT emulation #2 */
4658+ unsigned int ba;
4659+
4660+ err = get_user(ba, (unsigned int *)regs->pc);
4661+
4662+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4663+ unsigned int addr;
4664+
4665+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4666+ regs->pc = addr;
4667+ regs->npc = addr+4;
4668+ return 2;
4669+ }
4670+ }
4671+
4672+ do { /* PaX: patched PLT emulation #3 */
4673+ unsigned int sethi, jmpl, nop;
4674+
4675+ err = get_user(sethi, (unsigned int *)regs->pc);
4676+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4677+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4678+
4679+ if (err)
4680+ break;
4681+
4682+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4683+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4684+ nop == 0x01000000U)
4685+ {
4686+ unsigned int addr;
4687+
4688+ addr = (sethi & 0x003FFFFFU) << 10;
4689+ regs->u_regs[UREG_G1] = addr;
4690+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4691+ regs->pc = addr;
4692+ regs->npc = addr+4;
4693+ return 2;
4694+ }
4695+ } while (0);
4696+
4697+ do { /* PaX: unpatched PLT emulation step 1 */
4698+ unsigned int sethi, ba, nop;
4699+
4700+ err = get_user(sethi, (unsigned int *)regs->pc);
4701+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4702+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4703+
4704+ if (err)
4705+ break;
4706+
4707+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4708+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4709+ nop == 0x01000000U)
4710+ {
4711+ unsigned int addr, save, call;
4712+
4713+ if ((ba & 0xFFC00000U) == 0x30800000U)
4714+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4715+ else
4716+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4717+
4718+ err = get_user(save, (unsigned int *)addr);
4719+ err |= get_user(call, (unsigned int *)(addr+4));
4720+ err |= get_user(nop, (unsigned int *)(addr+8));
4721+ if (err)
4722+ break;
4723+
4724+#ifdef CONFIG_PAX_DLRESOLVE
4725+ if (save == 0x9DE3BFA8U &&
4726+ (call & 0xC0000000U) == 0x40000000U &&
4727+ nop == 0x01000000U)
4728+ {
4729+ struct vm_area_struct *vma;
4730+ unsigned long call_dl_resolve;
4731+
4732+ down_read(&current->mm->mmap_sem);
4733+ call_dl_resolve = current->mm->call_dl_resolve;
4734+ up_read(&current->mm->mmap_sem);
4735+ if (likely(call_dl_resolve))
4736+ goto emulate;
4737+
4738+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4739+
4740+ down_write(&current->mm->mmap_sem);
4741+ if (current->mm->call_dl_resolve) {
4742+ call_dl_resolve = current->mm->call_dl_resolve;
4743+ up_write(&current->mm->mmap_sem);
4744+ if (vma)
4745+ kmem_cache_free(vm_area_cachep, vma);
4746+ goto emulate;
4747+ }
4748+
4749+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4750+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4751+ up_write(&current->mm->mmap_sem);
4752+ if (vma)
4753+ kmem_cache_free(vm_area_cachep, vma);
4754+ return 1;
4755+ }
4756+
4757+ if (pax_insert_vma(vma, call_dl_resolve)) {
4758+ up_write(&current->mm->mmap_sem);
4759+ kmem_cache_free(vm_area_cachep, vma);
4760+ return 1;
4761+ }
4762+
4763+ current->mm->call_dl_resolve = call_dl_resolve;
4764+ up_write(&current->mm->mmap_sem);
4765+
4766+emulate:
4767+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4768+ regs->pc = call_dl_resolve;
4769+ regs->npc = addr+4;
4770+ return 3;
4771+ }
4772+#endif
4773+
4774+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4775+ if ((save & 0xFFC00000U) == 0x05000000U &&
4776+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4777+ nop == 0x01000000U)
4778+ {
4779+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4780+ regs->u_regs[UREG_G2] = addr + 4;
4781+ addr = (save & 0x003FFFFFU) << 10;
4782+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4783+ regs->pc = addr;
4784+ regs->npc = addr+4;
4785+ return 3;
4786+ }
4787+ }
4788+ } while (0);
4789+
4790+ do { /* PaX: unpatched PLT emulation step 2 */
4791+ unsigned int save, call, nop;
4792+
4793+ err = get_user(save, (unsigned int *)(regs->pc-4));
4794+ err |= get_user(call, (unsigned int *)regs->pc);
4795+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4796+ if (err)
4797+ break;
4798+
4799+ if (save == 0x9DE3BFA8U &&
4800+ (call & 0xC0000000U) == 0x40000000U &&
4801+ nop == 0x01000000U)
4802+ {
4803+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4804+
4805+ regs->u_regs[UREG_RETPC] = regs->pc;
4806+ regs->pc = dl_resolve;
4807+ regs->npc = dl_resolve+4;
4808+ return 3;
4809+ }
4810+ } while (0);
4811+#endif
4812+
4813+ return 1;
4814+}
4815+
4816+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4817+{
4818+ unsigned long i;
4819+
4820+ printk(KERN_ERR "PAX: bytes at PC: ");
4821+ for (i = 0; i < 8; i++) {
4822+ unsigned int c;
4823+ if (get_user(c, (unsigned int *)pc+i))
4824+ printk(KERN_CONT "???????? ");
4825+ else
4826+ printk(KERN_CONT "%08x ", c);
4827+ }
4828+ printk("\n");
4829+}
4830+#endif
4831+
4832 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4833 int text_fault)
4834 {
4835@@ -280,6 +545,24 @@ good_area:
4836 if(!(vma->vm_flags & VM_WRITE))
4837 goto bad_area;
4838 } else {
4839+
4840+#ifdef CONFIG_PAX_PAGEEXEC
4841+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4842+ up_read(&mm->mmap_sem);
4843+ switch (pax_handle_fetch_fault(regs)) {
4844+
4845+#ifdef CONFIG_PAX_EMUPLT
4846+ case 2:
4847+ case 3:
4848+ return;
4849+#endif
4850+
4851+ }
4852+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4853+ do_group_exit(SIGKILL);
4854+ }
4855+#endif
4856+
4857 /* Allow reads even for write-only mappings */
4858 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4859 goto bad_area;
4860diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
4861index 504c062..6fcb9c6 100644
4862--- a/arch/sparc/mm/fault_64.c
4863+++ b/arch/sparc/mm/fault_64.c
4864@@ -21,6 +21,9 @@
4865 #include <linux/kprobes.h>
4866 #include <linux/kdebug.h>
4867 #include <linux/percpu.h>
4868+#include <linux/slab.h>
4869+#include <linux/pagemap.h>
4870+#include <linux/compiler.h>
4871
4872 #include <asm/page.h>
4873 #include <asm/pgtable.h>
4874@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
4875 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4876 regs->tpc);
4877 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4878- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4879+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4880 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4881 dump_stack();
4882 unhandled_fault(regs->tpc, current, regs);
4883@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
4884 show_regs(regs);
4885 }
4886
4887+#ifdef CONFIG_PAX_PAGEEXEC
4888+#ifdef CONFIG_PAX_DLRESOLVE
4889+static void pax_emuplt_close(struct vm_area_struct *vma)
4890+{
4891+ vma->vm_mm->call_dl_resolve = 0UL;
4892+}
4893+
4894+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4895+{
4896+ unsigned int *kaddr;
4897+
4898+ vmf->page = alloc_page(GFP_HIGHUSER);
4899+ if (!vmf->page)
4900+ return VM_FAULT_OOM;
4901+
4902+ kaddr = kmap(vmf->page);
4903+ memset(kaddr, 0, PAGE_SIZE);
4904+ kaddr[0] = 0x9DE3BFA8U; /* save */
4905+ flush_dcache_page(vmf->page);
4906+ kunmap(vmf->page);
4907+ return VM_FAULT_MAJOR;
4908+}
4909+
4910+static const struct vm_operations_struct pax_vm_ops = {
4911+ .close = pax_emuplt_close,
4912+ .fault = pax_emuplt_fault
4913+};
4914+
4915+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4916+{
4917+ int ret;
4918+
4919+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4920+ vma->vm_mm = current->mm;
4921+ vma->vm_start = addr;
4922+ vma->vm_end = addr + PAGE_SIZE;
4923+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4924+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4925+ vma->vm_ops = &pax_vm_ops;
4926+
4927+ ret = insert_vm_struct(current->mm, vma);
4928+ if (ret)
4929+ return ret;
4930+
4931+ ++current->mm->total_vm;
4932+ return 0;
4933+}
4934+#endif
4935+
4936+/*
4937+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4938+ *
4939+ * returns 1 when task should be killed
4940+ * 2 when patched PLT trampoline was detected
4941+ * 3 when unpatched PLT trampoline was detected
4942+ */
4943+static int pax_handle_fetch_fault(struct pt_regs *regs)
4944+{
4945+
4946+#ifdef CONFIG_PAX_EMUPLT
4947+ int err;
4948+
4949+ do { /* PaX: patched PLT emulation #1 */
4950+ unsigned int sethi1, sethi2, jmpl;
4951+
4952+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4953+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4954+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4955+
4956+ if (err)
4957+ break;
4958+
4959+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4960+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4961+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4962+ {
4963+ unsigned long addr;
4964+
4965+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4966+ addr = regs->u_regs[UREG_G1];
4967+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4968+
4969+ if (test_thread_flag(TIF_32BIT))
4970+ addr &= 0xFFFFFFFFUL;
4971+
4972+ regs->tpc = addr;
4973+ regs->tnpc = addr+4;
4974+ return 2;
4975+ }
4976+ } while (0);
4977+
4978+ { /* PaX: patched PLT emulation #2 */
4979+ unsigned int ba;
4980+
4981+ err = get_user(ba, (unsigned int *)regs->tpc);
4982+
4983+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4984+ unsigned long addr;
4985+
4986+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4987+
4988+ if (test_thread_flag(TIF_32BIT))
4989+ addr &= 0xFFFFFFFFUL;
4990+
4991+ regs->tpc = addr;
4992+ regs->tnpc = addr+4;
4993+ return 2;
4994+ }
4995+ }
4996+
4997+ do { /* PaX: patched PLT emulation #3 */
4998+ unsigned int sethi, jmpl, nop;
4999+
5000+ err = get_user(sethi, (unsigned int *)regs->tpc);
5001+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5002+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5003+
5004+ if (err)
5005+ break;
5006+
5007+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5008+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5009+ nop == 0x01000000U)
5010+ {
5011+ unsigned long addr;
5012+
5013+ addr = (sethi & 0x003FFFFFU) << 10;
5014+ regs->u_regs[UREG_G1] = addr;
5015+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5016+
5017+ if (test_thread_flag(TIF_32BIT))
5018+ addr &= 0xFFFFFFFFUL;
5019+
5020+ regs->tpc = addr;
5021+ regs->tnpc = addr+4;
5022+ return 2;
5023+ }
5024+ } while (0);
5025+
5026+ do { /* PaX: patched PLT emulation #4 */
5027+ unsigned int sethi, mov1, call, mov2;
5028+
5029+ err = get_user(sethi, (unsigned int *)regs->tpc);
5030+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5031+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5032+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5033+
5034+ if (err)
5035+ break;
5036+
5037+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5038+ mov1 == 0x8210000FU &&
5039+ (call & 0xC0000000U) == 0x40000000U &&
5040+ mov2 == 0x9E100001U)
5041+ {
5042+ unsigned long addr;
5043+
5044+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5045+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5046+
5047+ if (test_thread_flag(TIF_32BIT))
5048+ addr &= 0xFFFFFFFFUL;
5049+
5050+ regs->tpc = addr;
5051+ regs->tnpc = addr+4;
5052+ return 2;
5053+ }
5054+ } while (0);
5055+
5056+ do { /* PaX: patched PLT emulation #5 */
5057+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5058+
5059+ err = get_user(sethi, (unsigned int *)regs->tpc);
5060+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5061+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5062+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5063+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5064+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5065+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5066+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5067+
5068+ if (err)
5069+ break;
5070+
5071+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5072+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5073+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5074+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5075+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5076+ sllx == 0x83287020U &&
5077+ jmpl == 0x81C04005U &&
5078+ nop == 0x01000000U)
5079+ {
5080+ unsigned long addr;
5081+
5082+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5083+ regs->u_regs[UREG_G1] <<= 32;
5084+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5085+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5086+ regs->tpc = addr;
5087+ regs->tnpc = addr+4;
5088+ return 2;
5089+ }
5090+ } while (0);
5091+
5092+ do { /* PaX: patched PLT emulation #6 */
5093+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5094+
5095+ err = get_user(sethi, (unsigned int *)regs->tpc);
5096+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5097+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5098+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5099+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5100+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5101+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5102+
5103+ if (err)
5104+ break;
5105+
5106+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5107+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5108+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5109+ sllx == 0x83287020U &&
5110+ (or & 0xFFFFE000U) == 0x8A116000U &&
5111+ jmpl == 0x81C04005U &&
5112+ nop == 0x01000000U)
5113+ {
5114+ unsigned long addr;
5115+
5116+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5117+ regs->u_regs[UREG_G1] <<= 32;
5118+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5119+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5120+ regs->tpc = addr;
5121+ regs->tnpc = addr+4;
5122+ return 2;
5123+ }
5124+ } while (0);
5125+
5126+ do { /* PaX: unpatched PLT emulation step 1 */
5127+ unsigned int sethi, ba, nop;
5128+
5129+ err = get_user(sethi, (unsigned int *)regs->tpc);
5130+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5131+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5132+
5133+ if (err)
5134+ break;
5135+
5136+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5137+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5138+ nop == 0x01000000U)
5139+ {
5140+ unsigned long addr;
5141+ unsigned int save, call;
5142+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5143+
5144+ if ((ba & 0xFFC00000U) == 0x30800000U)
5145+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5146+ else
5147+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5148+
5149+ if (test_thread_flag(TIF_32BIT))
5150+ addr &= 0xFFFFFFFFUL;
5151+
5152+ err = get_user(save, (unsigned int *)addr);
5153+ err |= get_user(call, (unsigned int *)(addr+4));
5154+ err |= get_user(nop, (unsigned int *)(addr+8));
5155+ if (err)
5156+ break;
5157+
5158+#ifdef CONFIG_PAX_DLRESOLVE
5159+ if (save == 0x9DE3BFA8U &&
5160+ (call & 0xC0000000U) == 0x40000000U &&
5161+ nop == 0x01000000U)
5162+ {
5163+ struct vm_area_struct *vma;
5164+ unsigned long call_dl_resolve;
5165+
5166+ down_read(&current->mm->mmap_sem);
5167+ call_dl_resolve = current->mm->call_dl_resolve;
5168+ up_read(&current->mm->mmap_sem);
5169+ if (likely(call_dl_resolve))
5170+ goto emulate;
5171+
5172+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5173+
5174+ down_write(&current->mm->mmap_sem);
5175+ if (current->mm->call_dl_resolve) {
5176+ call_dl_resolve = current->mm->call_dl_resolve;
5177+ up_write(&current->mm->mmap_sem);
5178+ if (vma)
5179+ kmem_cache_free(vm_area_cachep, vma);
5180+ goto emulate;
5181+ }
5182+
5183+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5184+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5185+ up_write(&current->mm->mmap_sem);
5186+ if (vma)
5187+ kmem_cache_free(vm_area_cachep, vma);
5188+ return 1;
5189+ }
5190+
5191+ if (pax_insert_vma(vma, call_dl_resolve)) {
5192+ up_write(&current->mm->mmap_sem);
5193+ kmem_cache_free(vm_area_cachep, vma);
5194+ return 1;
5195+ }
5196+
5197+ current->mm->call_dl_resolve = call_dl_resolve;
5198+ up_write(&current->mm->mmap_sem);
5199+
5200+emulate:
5201+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5202+ regs->tpc = call_dl_resolve;
5203+ regs->tnpc = addr+4;
5204+ return 3;
5205+ }
5206+#endif
5207+
5208+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5209+ if ((save & 0xFFC00000U) == 0x05000000U &&
5210+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5211+ nop == 0x01000000U)
5212+ {
5213+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5214+ regs->u_regs[UREG_G2] = addr + 4;
5215+ addr = (save & 0x003FFFFFU) << 10;
5216+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5217+
5218+ if (test_thread_flag(TIF_32BIT))
5219+ addr &= 0xFFFFFFFFUL;
5220+
5221+ regs->tpc = addr;
5222+ regs->tnpc = addr+4;
5223+ return 3;
5224+ }
5225+
5226+ /* PaX: 64-bit PLT stub */
5227+ err = get_user(sethi1, (unsigned int *)addr);
5228+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5229+ err |= get_user(or1, (unsigned int *)(addr+8));
5230+ err |= get_user(or2, (unsigned int *)(addr+12));
5231+ err |= get_user(sllx, (unsigned int *)(addr+16));
5232+ err |= get_user(add, (unsigned int *)(addr+20));
5233+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5234+ err |= get_user(nop, (unsigned int *)(addr+28));
5235+ if (err)
5236+ break;
5237+
5238+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5239+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5240+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5241+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5242+ sllx == 0x89293020U &&
5243+ add == 0x8A010005U &&
5244+ jmpl == 0x89C14000U &&
5245+ nop == 0x01000000U)
5246+ {
5247+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5248+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5249+ regs->u_regs[UREG_G4] <<= 32;
5250+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5251+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5252+ regs->u_regs[UREG_G4] = addr + 24;
5253+ addr = regs->u_regs[UREG_G5];
5254+ regs->tpc = addr;
5255+ regs->tnpc = addr+4;
5256+ return 3;
5257+ }
5258+ }
5259+ } while (0);
5260+
5261+#ifdef CONFIG_PAX_DLRESOLVE
5262+ do { /* PaX: unpatched PLT emulation step 2 */
5263+ unsigned int save, call, nop;
5264+
5265+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5266+ err |= get_user(call, (unsigned int *)regs->tpc);
5267+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5268+ if (err)
5269+ break;
5270+
5271+ if (save == 0x9DE3BFA8U &&
5272+ (call & 0xC0000000U) == 0x40000000U &&
5273+ nop == 0x01000000U)
5274+ {
5275+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5276+
5277+ if (test_thread_flag(TIF_32BIT))
5278+ dl_resolve &= 0xFFFFFFFFUL;
5279+
5280+ regs->u_regs[UREG_RETPC] = regs->tpc;
5281+ regs->tpc = dl_resolve;
5282+ regs->tnpc = dl_resolve+4;
5283+ return 3;
5284+ }
5285+ } while (0);
5286+#endif
5287+
5288+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5289+ unsigned int sethi, ba, nop;
5290+
5291+ err = get_user(sethi, (unsigned int *)regs->tpc);
5292+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5293+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5294+
5295+ if (err)
5296+ break;
5297+
5298+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5299+ (ba & 0xFFF00000U) == 0x30600000U &&
5300+ nop == 0x01000000U)
5301+ {
5302+ unsigned long addr;
5303+
5304+ addr = (sethi & 0x003FFFFFU) << 10;
5305+ regs->u_regs[UREG_G1] = addr;
5306+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5307+
5308+ if (test_thread_flag(TIF_32BIT))
5309+ addr &= 0xFFFFFFFFUL;
5310+
5311+ regs->tpc = addr;
5312+ regs->tnpc = addr+4;
5313+ return 2;
5314+ }
5315+ } while (0);
5316+
5317+#endif
5318+
5319+ return 1;
5320+}
5321+
5322+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5323+{
5324+ unsigned long i;
5325+
5326+ printk(KERN_ERR "PAX: bytes at PC: ");
5327+ for (i = 0; i < 8; i++) {
5328+ unsigned int c;
5329+ if (get_user(c, (unsigned int *)pc+i))
5330+ printk(KERN_CONT "???????? ");
5331+ else
5332+ printk(KERN_CONT "%08x ", c);
5333+ }
5334+ printk("\n");
5335+}
5336+#endif
5337+
5338 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5339 {
5340 struct mm_struct *mm = current->mm;
5341@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5342 if (!vma)
5343 goto bad_area;
5344
5345+#ifdef CONFIG_PAX_PAGEEXEC
5346+ /* PaX: detect ITLB misses on non-exec pages */
5347+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5348+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5349+ {
5350+ if (address != regs->tpc)
5351+ goto good_area;
5352+
5353+ up_read(&mm->mmap_sem);
5354+ switch (pax_handle_fetch_fault(regs)) {
5355+
5356+#ifdef CONFIG_PAX_EMUPLT
5357+ case 2:
5358+ case 3:
5359+ return;
5360+#endif
5361+
5362+ }
5363+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5364+ do_group_exit(SIGKILL);
5365+ }
5366+#endif
5367+
5368 /* Pure DTLB misses do not tell us whether the fault causing
5369 * load/store/atomic was a write or not, it only says that there
5370 * was no match. So in such a case we (carefully) read the
5371diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5372index 07e1453..0a7d9e9 100644
5373--- a/arch/sparc/mm/hugetlbpage.c
5374+++ b/arch/sparc/mm/hugetlbpage.c
5375@@ -67,7 +67,7 @@ full_search:
5376 }
5377 return -ENOMEM;
5378 }
5379- if (likely(!vma || addr + len <= vma->vm_start)) {
5380+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5381 /*
5382 * Remember the place where we stopped the search:
5383 */
5384@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5385 /* make sure it can fit in the remaining address space */
5386 if (likely(addr > len)) {
5387 vma = find_vma(mm, addr-len);
5388- if (!vma || addr <= vma->vm_start) {
5389+ if (check_heap_stack_gap(vma, addr - len, len)) {
5390 /* remember the address as a hint for next time */
5391 return (mm->free_area_cache = addr-len);
5392 }
5393@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5394 if (unlikely(mm->mmap_base < len))
5395 goto bottomup;
5396
5397- addr = (mm->mmap_base-len) & HPAGE_MASK;
5398+ addr = mm->mmap_base - len;
5399
5400 do {
5401+ addr &= HPAGE_MASK;
5402 /*
5403 * Lookup failure means no vma is above this address,
5404 * else if new region fits below vma->vm_start,
5405 * return with success:
5406 */
5407 vma = find_vma(mm, addr);
5408- if (likely(!vma || addr+len <= vma->vm_start)) {
5409+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5410 /* remember the address as a hint for next time */
5411 return (mm->free_area_cache = addr);
5412 }
5413@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5414 mm->cached_hole_size = vma->vm_start - addr;
5415
5416 /* try just below the current vma->vm_start */
5417- addr = (vma->vm_start-len) & HPAGE_MASK;
5418- } while (likely(len < vma->vm_start));
5419+ addr = skip_heap_stack_gap(vma, len);
5420+ } while (!IS_ERR_VALUE(addr));
5421
5422 bottomup:
5423 /*
5424@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5425 if (addr) {
5426 addr = ALIGN(addr, HPAGE_SIZE);
5427 vma = find_vma(mm, addr);
5428- if (task_size - len >= addr &&
5429- (!vma || addr + len <= vma->vm_start))
5430+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5431 return addr;
5432 }
5433 if (mm->get_unmapped_area == arch_get_unmapped_area)
5434diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5435index 7b00de6..78239f4 100644
5436--- a/arch/sparc/mm/init_32.c
5437+++ b/arch/sparc/mm/init_32.c
5438@@ -316,6 +316,9 @@ extern void device_scan(void);
5439 pgprot_t PAGE_SHARED __read_mostly;
5440 EXPORT_SYMBOL(PAGE_SHARED);
5441
5442+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5443+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5444+
5445 void __init paging_init(void)
5446 {
5447 switch(sparc_cpu_model) {
5448@@ -344,17 +347,17 @@ void __init paging_init(void)
5449
5450 /* Initialize the protection map with non-constant, MMU dependent values. */
5451 protection_map[0] = PAGE_NONE;
5452- protection_map[1] = PAGE_READONLY;
5453- protection_map[2] = PAGE_COPY;
5454- protection_map[3] = PAGE_COPY;
5455+ protection_map[1] = PAGE_READONLY_NOEXEC;
5456+ protection_map[2] = PAGE_COPY_NOEXEC;
5457+ protection_map[3] = PAGE_COPY_NOEXEC;
5458 protection_map[4] = PAGE_READONLY;
5459 protection_map[5] = PAGE_READONLY;
5460 protection_map[6] = PAGE_COPY;
5461 protection_map[7] = PAGE_COPY;
5462 protection_map[8] = PAGE_NONE;
5463- protection_map[9] = PAGE_READONLY;
5464- protection_map[10] = PAGE_SHARED;
5465- protection_map[11] = PAGE_SHARED;
5466+ protection_map[9] = PAGE_READONLY_NOEXEC;
5467+ protection_map[10] = PAGE_SHARED_NOEXEC;
5468+ protection_map[11] = PAGE_SHARED_NOEXEC;
5469 protection_map[12] = PAGE_READONLY;
5470 protection_map[13] = PAGE_READONLY;
5471 protection_map[14] = PAGE_SHARED;
5472diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5473index cbef74e..c38fead 100644
5474--- a/arch/sparc/mm/srmmu.c
5475+++ b/arch/sparc/mm/srmmu.c
5476@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5477 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5478 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5479 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5480+
5481+#ifdef CONFIG_PAX_PAGEEXEC
5482+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5483+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5484+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5485+#endif
5486+
5487 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5488 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5489
5490diff --git a/arch/um/Makefile b/arch/um/Makefile
5491index 7730af6..cce5b19 100644
5492--- a/arch/um/Makefile
5493+++ b/arch/um/Makefile
5494@@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5495 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5496 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
5497
5498+ifdef CONSTIFY_PLUGIN
5499+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5500+endif
5501+
5502 #This will adjust *FLAGS accordingly to the platform.
5503 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
5504
5505diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5506index 6c03acd..a5e0215 100644
5507--- a/arch/um/include/asm/kmap_types.h
5508+++ b/arch/um/include/asm/kmap_types.h
5509@@ -23,6 +23,7 @@ enum km_type {
5510 KM_IRQ1,
5511 KM_SOFTIRQ0,
5512 KM_SOFTIRQ1,
5513+ KM_CLEARPAGE,
5514 KM_TYPE_NR
5515 };
5516
5517diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5518index 7cfc3ce..cbd1a58 100644
5519--- a/arch/um/include/asm/page.h
5520+++ b/arch/um/include/asm/page.h
5521@@ -14,6 +14,9 @@
5522 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5523 #define PAGE_MASK (~(PAGE_SIZE-1))
5524
5525+#define ktla_ktva(addr) (addr)
5526+#define ktva_ktla(addr) (addr)
5527+
5528 #ifndef __ASSEMBLY__
5529
5530 struct page;
5531diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5532index c533835..84db18e 100644
5533--- a/arch/um/kernel/process.c
5534+++ b/arch/um/kernel/process.c
5535@@ -406,22 +406,6 @@ int singlestepping(void * t)
5536 return 2;
5537 }
5538
5539-/*
5540- * Only x86 and x86_64 have an arch_align_stack().
5541- * All other arches have "#define arch_align_stack(x) (x)"
5542- * in their asm/system.h
5543- * As this is included in UML from asm-um/system-generic.h,
5544- * we can use it to behave as the subarch does.
5545- */
5546-#ifndef arch_align_stack
5547-unsigned long arch_align_stack(unsigned long sp)
5548-{
5549- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5550- sp -= get_random_int() % 8192;
5551- return sp & ~0xf;
5552-}
5553-#endif
5554-
5555 unsigned long get_wchan(struct task_struct *p)
5556 {
5557 unsigned long stack_page, sp, ip;
5558diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5559index efb4294..61bc18c 100644
5560--- a/arch/x86/Kconfig
5561+++ b/arch/x86/Kconfig
5562@@ -235,7 +235,7 @@ config X86_HT
5563
5564 config X86_32_LAZY_GS
5565 def_bool y
5566- depends on X86_32 && !CC_STACKPROTECTOR
5567+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5568
5569 config ARCH_HWEIGHT_CFLAGS
5570 string
5571@@ -1022,7 +1022,7 @@ choice
5572
5573 config NOHIGHMEM
5574 bool "off"
5575- depends on !X86_NUMAQ
5576+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5577 ---help---
5578 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5579 However, the address space of 32-bit x86 processors is only 4
5580@@ -1059,7 +1059,7 @@ config NOHIGHMEM
5581
5582 config HIGHMEM4G
5583 bool "4GB"
5584- depends on !X86_NUMAQ
5585+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5586 ---help---
5587 Select this if you have a 32-bit processor and between 1 and 4
5588 gigabytes of physical RAM.
5589@@ -1113,7 +1113,7 @@ config PAGE_OFFSET
5590 hex
5591 default 0xB0000000 if VMSPLIT_3G_OPT
5592 default 0x80000000 if VMSPLIT_2G
5593- default 0x78000000 if VMSPLIT_2G_OPT
5594+ default 0x70000000 if VMSPLIT_2G_OPT
5595 default 0x40000000 if VMSPLIT_1G
5596 default 0xC0000000
5597 depends on X86_32
5598@@ -1496,6 +1496,7 @@ config SECCOMP
5599
5600 config CC_STACKPROTECTOR
5601 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5602+ depends on X86_64 || !PAX_MEMORY_UDEREF
5603 ---help---
5604 This option turns on the -fstack-protector GCC feature. This
5605 feature puts, at the beginning of functions, a canary value on
5606@@ -1553,6 +1554,7 @@ config KEXEC_JUMP
5607 config PHYSICAL_START
5608 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5609 default "0x1000000"
5610+ range 0x400000 0x40000000
5611 ---help---
5612 This gives the physical address where the kernel is loaded.
5613
5614@@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
5615 config PHYSICAL_ALIGN
5616 hex "Alignment value to which kernel should be aligned" if X86_32
5617 default "0x1000000"
5618+ range 0x400000 0x1000000 if PAX_KERNEXEC
5619 range 0x2000 0x1000000
5620 ---help---
5621 This value puts the alignment restrictions on physical address
5622@@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
5623 Say N if you want to disable CPU hotplug.
5624
5625 config COMPAT_VDSO
5626- def_bool y
5627+ def_bool n
5628 prompt "Compat VDSO support"
5629 depends on X86_32 || IA32_EMULATION
5630+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5631 ---help---
5632 Map the 32-bit VDSO to the predictable old-style address too.
5633
5634diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5635index e3ca7e0..b30b28a 100644
5636--- a/arch/x86/Kconfig.cpu
5637+++ b/arch/x86/Kconfig.cpu
5638@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5639
5640 config X86_F00F_BUG
5641 def_bool y
5642- depends on M586MMX || M586TSC || M586 || M486 || M386
5643+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5644
5645 config X86_INVD_BUG
5646 def_bool y
5647@@ -365,7 +365,7 @@ config X86_POPAD_OK
5648
5649 config X86_ALIGNMENT_16
5650 def_bool y
5651- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5652+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5653
5654 config X86_INTEL_USERCOPY
5655 def_bool y
5656@@ -411,7 +411,7 @@ config X86_CMPXCHG64
5657 # generates cmov.
5658 config X86_CMOV
5659 def_bool y
5660- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5661+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5662
5663 config X86_MINIMUM_CPU_FAMILY
5664 int
5665diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5666index bf56e17..05f9891 100644
5667--- a/arch/x86/Kconfig.debug
5668+++ b/arch/x86/Kconfig.debug
5669@@ -81,7 +81,7 @@ config X86_PTDUMP
5670 config DEBUG_RODATA
5671 bool "Write protect kernel read-only data structures"
5672 default y
5673- depends on DEBUG_KERNEL
5674+ depends on DEBUG_KERNEL && BROKEN
5675 ---help---
5676 Mark the kernel read-only data as write-protected in the pagetables,
5677 in order to catch accidental (and incorrect) writes to such const
5678@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5679
5680 config DEBUG_SET_MODULE_RONX
5681 bool "Set loadable kernel module data as NX and text as RO"
5682- depends on MODULES
5683+ depends on MODULES && BROKEN
5684 ---help---
5685 This option helps catch unintended modifications to loadable
5686 kernel module's text and read-only data. It also prevents execution
5687diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5688index b02e509..2631e48 100644
5689--- a/arch/x86/Makefile
5690+++ b/arch/x86/Makefile
5691@@ -46,6 +46,7 @@ else
5692 UTS_MACHINE := x86_64
5693 CHECKFLAGS += -D__x86_64__ -m64
5694
5695+ biarch := $(call cc-option,-m64)
5696 KBUILD_AFLAGS += -m64
5697 KBUILD_CFLAGS += -m64
5698
5699@@ -195,3 +196,12 @@ define archhelp
5700 echo ' FDARGS="..." arguments for the booted kernel'
5701 echo ' FDINITRD=file initrd for the booted kernel'
5702 endef
5703+
5704+define OLD_LD
5705+
5706+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5707+*** Please upgrade your binutils to 2.18 or newer
5708+endef
5709+
5710+archprepare:
5711+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5712diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5713index 95365a8..52f857b 100644
5714--- a/arch/x86/boot/Makefile
5715+++ b/arch/x86/boot/Makefile
5716@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5717 $(call cc-option, -fno-stack-protector) \
5718 $(call cc-option, -mpreferred-stack-boundary=2)
5719 KBUILD_CFLAGS += $(call cc-option, -m32)
5720+ifdef CONSTIFY_PLUGIN
5721+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5722+endif
5723 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5724 GCOV_PROFILE := n
5725
5726diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5727index 878e4b9..20537ab 100644
5728--- a/arch/x86/boot/bitops.h
5729+++ b/arch/x86/boot/bitops.h
5730@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5731 u8 v;
5732 const u32 *p = (const u32 *)addr;
5733
5734- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5735+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5736 return v;
5737 }
5738
5739@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5740
5741 static inline void set_bit(int nr, void *addr)
5742 {
5743- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5744+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5745 }
5746
5747 #endif /* BOOT_BITOPS_H */
5748diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
5749index c7093bd..d4247ffe0 100644
5750--- a/arch/x86/boot/boot.h
5751+++ b/arch/x86/boot/boot.h
5752@@ -85,7 +85,7 @@ static inline void io_delay(void)
5753 static inline u16 ds(void)
5754 {
5755 u16 seg;
5756- asm("movw %%ds,%0" : "=rm" (seg));
5757+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5758 return seg;
5759 }
5760
5761@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
5762 static inline int memcmp(const void *s1, const void *s2, size_t len)
5763 {
5764 u8 diff;
5765- asm("repe; cmpsb; setnz %0"
5766+ asm volatile("repe; cmpsb; setnz %0"
5767 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5768 return diff;
5769 }
5770diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
5771index 09664ef..edc5d03 100644
5772--- a/arch/x86/boot/compressed/Makefile
5773+++ b/arch/x86/boot/compressed/Makefile
5774@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
5775 KBUILD_CFLAGS += $(cflags-y)
5776 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5777 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5778+ifdef CONSTIFY_PLUGIN
5779+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5780+endif
5781
5782 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5783 GCOV_PROFILE := n
5784diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
5785index 67a655a..b924059 100644
5786--- a/arch/x86/boot/compressed/head_32.S
5787+++ b/arch/x86/boot/compressed/head_32.S
5788@@ -76,7 +76,7 @@ ENTRY(startup_32)
5789 notl %eax
5790 andl %eax, %ebx
5791 #else
5792- movl $LOAD_PHYSICAL_ADDR, %ebx
5793+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5794 #endif
5795
5796 /* Target address to relocate to for decompression */
5797@@ -162,7 +162,7 @@ relocated:
5798 * and where it was actually loaded.
5799 */
5800 movl %ebp, %ebx
5801- subl $LOAD_PHYSICAL_ADDR, %ebx
5802+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5803 jz 2f /* Nothing to be done if loaded at compiled addr. */
5804 /*
5805 * Process relocations.
5806@@ -170,8 +170,7 @@ relocated:
5807
5808 1: subl $4, %edi
5809 movl (%edi), %ecx
5810- testl %ecx, %ecx
5811- jz 2f
5812+ jecxz 2f
5813 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5814 jmp 1b
5815 2:
5816diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
5817index 35af09d..99c9676 100644
5818--- a/arch/x86/boot/compressed/head_64.S
5819+++ b/arch/x86/boot/compressed/head_64.S
5820@@ -91,7 +91,7 @@ ENTRY(startup_32)
5821 notl %eax
5822 andl %eax, %ebx
5823 #else
5824- movl $LOAD_PHYSICAL_ADDR, %ebx
5825+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5826 #endif
5827
5828 /* Target address to relocate to for decompression */
5829@@ -233,7 +233,7 @@ ENTRY(startup_64)
5830 notq %rax
5831 andq %rax, %rbp
5832 #else
5833- movq $LOAD_PHYSICAL_ADDR, %rbp
5834+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5835 #endif
5836
5837 /* Target address to relocate to for decompression */
5838diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
5839index 3a19d04..7c1d55a 100644
5840--- a/arch/x86/boot/compressed/misc.c
5841+++ b/arch/x86/boot/compressed/misc.c
5842@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5843 case PT_LOAD:
5844 #ifdef CONFIG_RELOCATABLE
5845 dest = output;
5846- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5847+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5848 #else
5849 dest = (void *)(phdr->p_paddr);
5850 #endif
5851@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
5852 error("Destination address too large");
5853 #endif
5854 #ifndef CONFIG_RELOCATABLE
5855- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5856+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5857 error("Wrong destination address");
5858 #endif
5859
5860diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
5861index 89bbf4e..869908e 100644
5862--- a/arch/x86/boot/compressed/relocs.c
5863+++ b/arch/x86/boot/compressed/relocs.c
5864@@ -13,8 +13,11 @@
5865
5866 static void die(char *fmt, ...);
5867
5868+#include "../../../../include/generated/autoconf.h"
5869+
5870 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5871 static Elf32_Ehdr ehdr;
5872+static Elf32_Phdr *phdr;
5873 static unsigned long reloc_count, reloc_idx;
5874 static unsigned long *relocs;
5875
5876@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5877 }
5878 }
5879
5880+static void read_phdrs(FILE *fp)
5881+{
5882+ unsigned int i;
5883+
5884+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5885+ if (!phdr) {
5886+ die("Unable to allocate %d program headers\n",
5887+ ehdr.e_phnum);
5888+ }
5889+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5890+ die("Seek to %d failed: %s\n",
5891+ ehdr.e_phoff, strerror(errno));
5892+ }
5893+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5894+ die("Cannot read ELF program headers: %s\n",
5895+ strerror(errno));
5896+ }
5897+ for(i = 0; i < ehdr.e_phnum; i++) {
5898+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5899+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5900+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5901+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5902+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5903+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5904+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5905+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5906+ }
5907+
5908+}
5909+
5910 static void read_shdrs(FILE *fp)
5911 {
5912- int i;
5913+ unsigned int i;
5914 Elf32_Shdr shdr;
5915
5916 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5917@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5918
5919 static void read_strtabs(FILE *fp)
5920 {
5921- int i;
5922+ unsigned int i;
5923 for (i = 0; i < ehdr.e_shnum; i++) {
5924 struct section *sec = &secs[i];
5925 if (sec->shdr.sh_type != SHT_STRTAB) {
5926@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5927
5928 static void read_symtabs(FILE *fp)
5929 {
5930- int i,j;
5931+ unsigned int i,j;
5932 for (i = 0; i < ehdr.e_shnum; i++) {
5933 struct section *sec = &secs[i];
5934 if (sec->shdr.sh_type != SHT_SYMTAB) {
5935@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5936
5937 static void read_relocs(FILE *fp)
5938 {
5939- int i,j;
5940+ unsigned int i,j;
5941+ uint32_t base;
5942+
5943 for (i = 0; i < ehdr.e_shnum; i++) {
5944 struct section *sec = &secs[i];
5945 if (sec->shdr.sh_type != SHT_REL) {
5946@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5947 die("Cannot read symbol table: %s\n",
5948 strerror(errno));
5949 }
5950+ base = 0;
5951+ for (j = 0; j < ehdr.e_phnum; j++) {
5952+ if (phdr[j].p_type != PT_LOAD )
5953+ continue;
5954+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5955+ continue;
5956+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5957+ break;
5958+ }
5959 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5960 Elf32_Rel *rel = &sec->reltab[j];
5961- rel->r_offset = elf32_to_cpu(rel->r_offset);
5962+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5963 rel->r_info = elf32_to_cpu(rel->r_info);
5964 }
5965 }
5966@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5967
5968 static void print_absolute_symbols(void)
5969 {
5970- int i;
5971+ unsigned int i;
5972 printf("Absolute symbols\n");
5973 printf(" Num: Value Size Type Bind Visibility Name\n");
5974 for (i = 0; i < ehdr.e_shnum; i++) {
5975 struct section *sec = &secs[i];
5976 char *sym_strtab;
5977 Elf32_Sym *sh_symtab;
5978- int j;
5979+ unsigned int j;
5980
5981 if (sec->shdr.sh_type != SHT_SYMTAB) {
5982 continue;
5983@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5984
5985 static void print_absolute_relocs(void)
5986 {
5987- int i, printed = 0;
5988+ unsigned int i, printed = 0;
5989
5990 for (i = 0; i < ehdr.e_shnum; i++) {
5991 struct section *sec = &secs[i];
5992 struct section *sec_applies, *sec_symtab;
5993 char *sym_strtab;
5994 Elf32_Sym *sh_symtab;
5995- int j;
5996+ unsigned int j;
5997 if (sec->shdr.sh_type != SHT_REL) {
5998 continue;
5999 }
6000@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6001
6002 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6003 {
6004- int i;
6005+ unsigned int i;
6006 /* Walk through the relocations */
6007 for (i = 0; i < ehdr.e_shnum; i++) {
6008 char *sym_strtab;
6009 Elf32_Sym *sh_symtab;
6010 struct section *sec_applies, *sec_symtab;
6011- int j;
6012+ unsigned int j;
6013 struct section *sec = &secs[i];
6014
6015 if (sec->shdr.sh_type != SHT_REL) {
6016@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6017 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6018 continue;
6019 }
6020+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6021+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6022+ continue;
6023+
6024+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6025+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6026+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6027+ continue;
6028+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6029+ continue;
6030+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6031+ continue;
6032+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6033+ continue;
6034+#endif
6035+
6036 switch (r_type) {
6037 case R_386_NONE:
6038 case R_386_PC32:
6039@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6040
6041 static void emit_relocs(int as_text)
6042 {
6043- int i;
6044+ unsigned int i;
6045 /* Count how many relocations I have and allocate space for them. */
6046 reloc_count = 0;
6047 walk_relocs(count_reloc);
6048@@ -665,6 +725,7 @@ int main(int argc, char **argv)
6049 fname, strerror(errno));
6050 }
6051 read_ehdr(fp);
6052+ read_phdrs(fp);
6053 read_shdrs(fp);
6054 read_strtabs(fp);
6055 read_symtabs(fp);
6056diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6057index 4d3ff03..e4972ff 100644
6058--- a/arch/x86/boot/cpucheck.c
6059+++ b/arch/x86/boot/cpucheck.c
6060@@ -74,7 +74,7 @@ static int has_fpu(void)
6061 u16 fcw = -1, fsw = -1;
6062 u32 cr0;
6063
6064- asm("movl %%cr0,%0" : "=r" (cr0));
6065+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6066 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6067 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6068 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6069@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6070 {
6071 u32 f0, f1;
6072
6073- asm("pushfl ; "
6074+ asm volatile("pushfl ; "
6075 "pushfl ; "
6076 "popl %0 ; "
6077 "movl %0,%1 ; "
6078@@ -115,7 +115,7 @@ static void get_flags(void)
6079 set_bit(X86_FEATURE_FPU, cpu.flags);
6080
6081 if (has_eflag(X86_EFLAGS_ID)) {
6082- asm("cpuid"
6083+ asm volatile("cpuid"
6084 : "=a" (max_intel_level),
6085 "=b" (cpu_vendor[0]),
6086 "=d" (cpu_vendor[1]),
6087@@ -124,7 +124,7 @@ static void get_flags(void)
6088
6089 if (max_intel_level >= 0x00000001 &&
6090 max_intel_level <= 0x0000ffff) {
6091- asm("cpuid"
6092+ asm volatile("cpuid"
6093 : "=a" (tfms),
6094 "=c" (cpu.flags[4]),
6095 "=d" (cpu.flags[0])
6096@@ -136,7 +136,7 @@ static void get_flags(void)
6097 cpu.model += ((tfms >> 16) & 0xf) << 4;
6098 }
6099
6100- asm("cpuid"
6101+ asm volatile("cpuid"
6102 : "=a" (max_amd_level)
6103 : "a" (0x80000000)
6104 : "ebx", "ecx", "edx");
6105@@ -144,7 +144,7 @@ static void get_flags(void)
6106 if (max_amd_level >= 0x80000001 &&
6107 max_amd_level <= 0x8000ffff) {
6108 u32 eax = 0x80000001;
6109- asm("cpuid"
6110+ asm volatile("cpuid"
6111 : "+a" (eax),
6112 "=c" (cpu.flags[6]),
6113 "=d" (cpu.flags[1])
6114@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6115 u32 ecx = MSR_K7_HWCR;
6116 u32 eax, edx;
6117
6118- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6119+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6120 eax &= ~(1 << 15);
6121- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6122+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6123
6124 get_flags(); /* Make sure it really did something */
6125 err = check_flags();
6126@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6127 u32 ecx = MSR_VIA_FCR;
6128 u32 eax, edx;
6129
6130- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6131+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6132 eax |= (1<<1)|(1<<7);
6133- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6134+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6135
6136 set_bit(X86_FEATURE_CX8, cpu.flags);
6137 err = check_flags();
6138@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6139 u32 eax, edx;
6140 u32 level = 1;
6141
6142- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6143- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6144- asm("cpuid"
6145+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6146+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6147+ asm volatile("cpuid"
6148 : "+a" (level), "=d" (cpu.flags[0])
6149 : : "ecx", "ebx");
6150- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6151+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6152
6153 err = check_flags();
6154 }
6155diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6156index bdb4d45..0476680 100644
6157--- a/arch/x86/boot/header.S
6158+++ b/arch/x86/boot/header.S
6159@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6160 # single linked list of
6161 # struct setup_data
6162
6163-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6164+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6165
6166 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6167 #define VO_INIT_SIZE (VO__end - VO__text)
6168diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6169index db75d07..8e6d0af 100644
6170--- a/arch/x86/boot/memory.c
6171+++ b/arch/x86/boot/memory.c
6172@@ -19,7 +19,7 @@
6173
6174 static int detect_memory_e820(void)
6175 {
6176- int count = 0;
6177+ unsigned int count = 0;
6178 struct biosregs ireg, oreg;
6179 struct e820entry *desc = boot_params.e820_map;
6180 static struct e820entry buf; /* static so it is zeroed */
6181diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6182index 11e8c6e..fdbb1ed 100644
6183--- a/arch/x86/boot/video-vesa.c
6184+++ b/arch/x86/boot/video-vesa.c
6185@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6186
6187 boot_params.screen_info.vesapm_seg = oreg.es;
6188 boot_params.screen_info.vesapm_off = oreg.di;
6189+ boot_params.screen_info.vesapm_size = oreg.cx;
6190 }
6191
6192 /*
6193diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6194index 43eda28..5ab5fdb 100644
6195--- a/arch/x86/boot/video.c
6196+++ b/arch/x86/boot/video.c
6197@@ -96,7 +96,7 @@ static void store_mode_params(void)
6198 static unsigned int get_entry(void)
6199 {
6200 char entry_buf[4];
6201- int i, len = 0;
6202+ unsigned int i, len = 0;
6203 int key;
6204 unsigned int v;
6205
6206diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6207index 5b577d5..3c1fed4 100644
6208--- a/arch/x86/crypto/aes-x86_64-asm_64.S
6209+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6210@@ -8,6 +8,8 @@
6211 * including this sentence is retained in full.
6212 */
6213
6214+#include <asm/alternative-asm.h>
6215+
6216 .extern crypto_ft_tab
6217 .extern crypto_it_tab
6218 .extern crypto_fl_tab
6219@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6220 je B192; \
6221 leaq 32(r9),r9;
6222
6223+#define ret pax_force_retaddr 0, 1; ret
6224+
6225 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6226 movq r1,r2; \
6227 movq r3,r4; \
6228diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6229index be6d9e3..21fbbca 100644
6230--- a/arch/x86/crypto/aesni-intel_asm.S
6231+++ b/arch/x86/crypto/aesni-intel_asm.S
6232@@ -31,6 +31,7 @@
6233
6234 #include <linux/linkage.h>
6235 #include <asm/inst.h>
6236+#include <asm/alternative-asm.h>
6237
6238 #ifdef __x86_64__
6239 .data
6240@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6241 pop %r14
6242 pop %r13
6243 pop %r12
6244+ pax_force_retaddr 0, 1
6245 ret
6246+ENDPROC(aesni_gcm_dec)
6247
6248
6249 /*****************************************************************************
6250@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6251 pop %r14
6252 pop %r13
6253 pop %r12
6254+ pax_force_retaddr 0, 1
6255 ret
6256+ENDPROC(aesni_gcm_enc)
6257
6258 #endif
6259
6260@@ -1714,6 +1719,7 @@ _key_expansion_256a:
6261 pxor %xmm1, %xmm0
6262 movaps %xmm0, (TKEYP)
6263 add $0x10, TKEYP
6264+ pax_force_retaddr_bts
6265 ret
6266
6267 .align 4
6268@@ -1738,6 +1744,7 @@ _key_expansion_192a:
6269 shufps $0b01001110, %xmm2, %xmm1
6270 movaps %xmm1, 0x10(TKEYP)
6271 add $0x20, TKEYP
6272+ pax_force_retaddr_bts
6273 ret
6274
6275 .align 4
6276@@ -1757,6 +1764,7 @@ _key_expansion_192b:
6277
6278 movaps %xmm0, (TKEYP)
6279 add $0x10, TKEYP
6280+ pax_force_retaddr_bts
6281 ret
6282
6283 .align 4
6284@@ -1769,6 +1777,7 @@ _key_expansion_256b:
6285 pxor %xmm1, %xmm2
6286 movaps %xmm2, (TKEYP)
6287 add $0x10, TKEYP
6288+ pax_force_retaddr_bts
6289 ret
6290
6291 /*
6292@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6293 #ifndef __x86_64__
6294 popl KEYP
6295 #endif
6296+ pax_force_retaddr 0, 1
6297 ret
6298+ENDPROC(aesni_set_key)
6299
6300 /*
6301 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6302@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6303 popl KLEN
6304 popl KEYP
6305 #endif
6306+ pax_force_retaddr 0, 1
6307 ret
6308+ENDPROC(aesni_enc)
6309
6310 /*
6311 * _aesni_enc1: internal ABI
6312@@ -1959,6 +1972,7 @@ _aesni_enc1:
6313 AESENC KEY STATE
6314 movaps 0x70(TKEYP), KEY
6315 AESENCLAST KEY STATE
6316+ pax_force_retaddr_bts
6317 ret
6318
6319 /*
6320@@ -2067,6 +2081,7 @@ _aesni_enc4:
6321 AESENCLAST KEY STATE2
6322 AESENCLAST KEY STATE3
6323 AESENCLAST KEY STATE4
6324+ pax_force_retaddr_bts
6325 ret
6326
6327 /*
6328@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6329 popl KLEN
6330 popl KEYP
6331 #endif
6332+ pax_force_retaddr 0, 1
6333 ret
6334+ENDPROC(aesni_dec)
6335
6336 /*
6337 * _aesni_dec1: internal ABI
6338@@ -2146,6 +2163,7 @@ _aesni_dec1:
6339 AESDEC KEY STATE
6340 movaps 0x70(TKEYP), KEY
6341 AESDECLAST KEY STATE
6342+ pax_force_retaddr_bts
6343 ret
6344
6345 /*
6346@@ -2254,6 +2272,7 @@ _aesni_dec4:
6347 AESDECLAST KEY STATE2
6348 AESDECLAST KEY STATE3
6349 AESDECLAST KEY STATE4
6350+ pax_force_retaddr_bts
6351 ret
6352
6353 /*
6354@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6355 popl KEYP
6356 popl LEN
6357 #endif
6358+ pax_force_retaddr 0, 1
6359 ret
6360+ENDPROC(aesni_ecb_enc)
6361
6362 /*
6363 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6364@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6365 popl KEYP
6366 popl LEN
6367 #endif
6368+ pax_force_retaddr 0, 1
6369 ret
6370+ENDPROC(aesni_ecb_dec)
6371
6372 /*
6373 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6374@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6375 popl LEN
6376 popl IVP
6377 #endif
6378+ pax_force_retaddr 0, 1
6379 ret
6380+ENDPROC(aesni_cbc_enc)
6381
6382 /*
6383 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6384@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6385 popl LEN
6386 popl IVP
6387 #endif
6388+ pax_force_retaddr 0, 1
6389 ret
6390+ENDPROC(aesni_cbc_dec)
6391
6392 #ifdef __x86_64__
6393 .align 16
6394@@ -2524,6 +2551,7 @@ _aesni_inc_init:
6395 mov $1, TCTR_LOW
6396 MOVQ_R64_XMM TCTR_LOW INC
6397 MOVQ_R64_XMM CTR TCTR_LOW
6398+ pax_force_retaddr_bts
6399 ret
6400
6401 /*
6402@@ -2552,6 +2580,7 @@ _aesni_inc:
6403 .Linc_low:
6404 movaps CTR, IV
6405 PSHUFB_XMM BSWAP_MASK IV
6406+ pax_force_retaddr_bts
6407 ret
6408
6409 /*
6410@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6411 .Lctr_enc_ret:
6412 movups IV, (IVP)
6413 .Lctr_enc_just_ret:
6414+ pax_force_retaddr 0, 1
6415 ret
6416+ENDPROC(aesni_ctr_enc)
6417 #endif
6418diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6419index 391d245..67f35c2 100644
6420--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
6421+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6422@@ -20,6 +20,8 @@
6423 *
6424 */
6425
6426+#include <asm/alternative-asm.h>
6427+
6428 .file "blowfish-x86_64-asm.S"
6429 .text
6430
6431@@ -151,9 +153,11 @@ __blowfish_enc_blk:
6432 jnz __enc_xor;
6433
6434 write_block();
6435+ pax_force_retaddr 0, 1
6436 ret;
6437 __enc_xor:
6438 xor_block();
6439+ pax_force_retaddr 0, 1
6440 ret;
6441
6442 .align 8
6443@@ -188,6 +192,7 @@ blowfish_dec_blk:
6444
6445 movq %r11, %rbp;
6446
6447+ pax_force_retaddr 0, 1
6448 ret;
6449
6450 /**********************************************************************
6451@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
6452
6453 popq %rbx;
6454 popq %rbp;
6455+ pax_force_retaddr 0, 1
6456 ret;
6457
6458 __enc_xor4:
6459@@ -349,6 +355,7 @@ __enc_xor4:
6460
6461 popq %rbx;
6462 popq %rbp;
6463+ pax_force_retaddr 0, 1
6464 ret;
6465
6466 .align 8
6467@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
6468 popq %rbx;
6469 popq %rbp;
6470
6471+ pax_force_retaddr 0, 1
6472 ret;
6473
6474diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6475index 6214a9b..1f4fc9a 100644
6476--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6477+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6478@@ -1,3 +1,5 @@
6479+#include <asm/alternative-asm.h>
6480+
6481 # enter ECRYPT_encrypt_bytes
6482 .text
6483 .p2align 5
6484@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6485 add %r11,%rsp
6486 mov %rdi,%rax
6487 mov %rsi,%rdx
6488+ pax_force_retaddr 0, 1
6489 ret
6490 # bytesatleast65:
6491 ._bytesatleast65:
6492@@ -891,6 +894,7 @@ ECRYPT_keysetup:
6493 add %r11,%rsp
6494 mov %rdi,%rax
6495 mov %rsi,%rdx
6496+ pax_force_retaddr
6497 ret
6498 # enter ECRYPT_ivsetup
6499 .text
6500@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6501 add %r11,%rsp
6502 mov %rdi,%rax
6503 mov %rsi,%rdx
6504+ pax_force_retaddr
6505 ret
6506diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
6507index b2c2f57..8470cab 100644
6508--- a/arch/x86/crypto/sha1_ssse3_asm.S
6509+++ b/arch/x86/crypto/sha1_ssse3_asm.S
6510@@ -28,6 +28,8 @@
6511 * (at your option) any later version.
6512 */
6513
6514+#include <asm/alternative-asm.h>
6515+
6516 #define CTX %rdi // arg1
6517 #define BUF %rsi // arg2
6518 #define CNT %rdx // arg3
6519@@ -104,6 +106,7 @@
6520 pop %r12
6521 pop %rbp
6522 pop %rbx
6523+ pax_force_retaddr 0, 1
6524 ret
6525
6526 .size \name, .-\name
6527diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6528index 5b012a2..36d5364 100644
6529--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6530+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6531@@ -20,6 +20,8 @@
6532 *
6533 */
6534
6535+#include <asm/alternative-asm.h>
6536+
6537 .file "twofish-x86_64-asm-3way.S"
6538 .text
6539
6540@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
6541 popq %r13;
6542 popq %r14;
6543 popq %r15;
6544+ pax_force_retaddr 0, 1
6545 ret;
6546
6547 __enc_xor3:
6548@@ -271,6 +274,7 @@ __enc_xor3:
6549 popq %r13;
6550 popq %r14;
6551 popq %r15;
6552+ pax_force_retaddr 0, 1
6553 ret;
6554
6555 .global twofish_dec_blk_3way
6556@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
6557 popq %r13;
6558 popq %r14;
6559 popq %r15;
6560+ pax_force_retaddr 0, 1
6561 ret;
6562
6563diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6564index 7bcf3fc..f53832f 100644
6565--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6566+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6567@@ -21,6 +21,7 @@
6568 .text
6569
6570 #include <asm/asm-offsets.h>
6571+#include <asm/alternative-asm.h>
6572
6573 #define a_offset 0
6574 #define b_offset 4
6575@@ -268,6 +269,7 @@ twofish_enc_blk:
6576
6577 popq R1
6578 movq $1,%rax
6579+ pax_force_retaddr 0, 1
6580 ret
6581
6582 twofish_dec_blk:
6583@@ -319,4 +321,5 @@ twofish_dec_blk:
6584
6585 popq R1
6586 movq $1,%rax
6587+ pax_force_retaddr 0, 1
6588 ret
6589diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6590index fd84387..0b4af7d 100644
6591--- a/arch/x86/ia32/ia32_aout.c
6592+++ b/arch/x86/ia32/ia32_aout.c
6593@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6594 unsigned long dump_start, dump_size;
6595 struct user32 dump;
6596
6597+ memset(&dump, 0, sizeof(dump));
6598+
6599 fs = get_fs();
6600 set_fs(KERNEL_DS);
6601 has_dumped = 1;
6602diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6603index 6557769..ef6ae89 100644
6604--- a/arch/x86/ia32/ia32_signal.c
6605+++ b/arch/x86/ia32/ia32_signal.c
6606@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6607 }
6608 seg = get_fs();
6609 set_fs(KERNEL_DS);
6610- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6611+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6612 set_fs(seg);
6613 if (ret >= 0 && uoss_ptr) {
6614 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6615@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6616 */
6617 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6618 size_t frame_size,
6619- void **fpstate)
6620+ void __user **fpstate)
6621 {
6622 unsigned long sp;
6623
6624@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6625
6626 if (used_math()) {
6627 sp = sp - sig_xstate_ia32_size;
6628- *fpstate = (struct _fpstate_ia32 *) sp;
6629+ *fpstate = (struct _fpstate_ia32 __user *) sp;
6630 if (save_i387_xstate_ia32(*fpstate) < 0)
6631 return (void __user *) -1L;
6632 }
6633@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6634 sp -= frame_size;
6635 /* Align the stack pointer according to the i386 ABI,
6636 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6637- sp = ((sp + 4) & -16ul) - 4;
6638+ sp = ((sp - 12) & -16ul) - 4;
6639 return (void __user *) sp;
6640 }
6641
6642@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6643 * These are actually not used anymore, but left because some
6644 * gdb versions depend on them as a marker.
6645 */
6646- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6647+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6648 } put_user_catch(err);
6649
6650 if (err)
6651@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6652 0xb8,
6653 __NR_ia32_rt_sigreturn,
6654 0x80cd,
6655- 0,
6656+ 0
6657 };
6658
6659 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6660@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6661
6662 if (ka->sa.sa_flags & SA_RESTORER)
6663 restorer = ka->sa.sa_restorer;
6664+ else if (current->mm->context.vdso)
6665+ /* Return stub is in 32bit vsyscall page */
6666+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6667 else
6668- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6669- rt_sigreturn);
6670+ restorer = &frame->retcode;
6671 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6672
6673 /*
6674 * Not actually used anymore, but left because some gdb
6675 * versions need it.
6676 */
6677- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6678+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6679 } put_user_catch(err);
6680
6681 if (err)
6682diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6683index a6253ec..4ad2120 100644
6684--- a/arch/x86/ia32/ia32entry.S
6685+++ b/arch/x86/ia32/ia32entry.S
6686@@ -13,7 +13,9 @@
6687 #include <asm/thread_info.h>
6688 #include <asm/segment.h>
6689 #include <asm/irqflags.h>
6690+#include <asm/pgtable.h>
6691 #include <linux/linkage.h>
6692+#include <asm/alternative-asm.h>
6693
6694 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6695 #include <linux/elf-em.h>
6696@@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
6697 ENDPROC(native_irq_enable_sysexit)
6698 #endif
6699
6700+ .macro pax_enter_kernel_user
6701+ pax_set_fptr_mask
6702+#ifdef CONFIG_PAX_MEMORY_UDEREF
6703+ call pax_enter_kernel_user
6704+#endif
6705+ .endm
6706+
6707+ .macro pax_exit_kernel_user
6708+#ifdef CONFIG_PAX_MEMORY_UDEREF
6709+ call pax_exit_kernel_user
6710+#endif
6711+#ifdef CONFIG_PAX_RANDKSTACK
6712+ pushq %rax
6713+ pushq %r11
6714+ call pax_randomize_kstack
6715+ popq %r11
6716+ popq %rax
6717+#endif
6718+ .endm
6719+
6720+.macro pax_erase_kstack
6721+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6722+ call pax_erase_kstack
6723+#endif
6724+.endm
6725+
6726 /*
6727 * 32bit SYSENTER instruction entry.
6728 *
6729@@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
6730 CFI_REGISTER rsp,rbp
6731 SWAPGS_UNSAFE_STACK
6732 movq PER_CPU_VAR(kernel_stack), %rsp
6733- addq $(KERNEL_STACK_OFFSET),%rsp
6734- /*
6735- * No need to follow this irqs on/off section: the syscall
6736- * disabled irqs, here we enable it straight after entry:
6737- */
6738- ENABLE_INTERRUPTS(CLBR_NONE)
6739 movl %ebp,%ebp /* zero extension */
6740 pushq_cfi $__USER32_DS
6741 /*CFI_REL_OFFSET ss,0*/
6742@@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
6743 CFI_REL_OFFSET rsp,0
6744 pushfq_cfi
6745 /*CFI_REL_OFFSET rflags,0*/
6746- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6747- CFI_REGISTER rip,r10
6748+ orl $X86_EFLAGS_IF,(%rsp)
6749+ GET_THREAD_INFO(%r11)
6750+ movl TI_sysenter_return(%r11), %r11d
6751+ CFI_REGISTER rip,r11
6752 pushq_cfi $__USER32_CS
6753 /*CFI_REL_OFFSET cs,0*/
6754 movl %eax, %eax
6755- pushq_cfi %r10
6756+ pushq_cfi %r11
6757 CFI_REL_OFFSET rip,0
6758 pushq_cfi %rax
6759 cld
6760 SAVE_ARGS 0,1,0
6761+ pax_enter_kernel_user
6762+ /*
6763+ * No need to follow this irqs on/off section: the syscall
6764+ * disabled irqs, here we enable it straight after entry:
6765+ */
6766+ ENABLE_INTERRUPTS(CLBR_NONE)
6767 /* no need to do an access_ok check here because rbp has been
6768 32bit zero extended */
6769+
6770+#ifdef CONFIG_PAX_MEMORY_UDEREF
6771+ mov $PAX_USER_SHADOW_BASE,%r11
6772+ add %r11,%rbp
6773+#endif
6774+
6775 1: movl (%rbp),%ebp
6776 .section __ex_table,"a"
6777 .quad 1b,ia32_badarg
6778 .previous
6779- GET_THREAD_INFO(%r10)
6780- orl $TS_COMPAT,TI_status(%r10)
6781- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6782+ GET_THREAD_INFO(%r11)
6783+ orl $TS_COMPAT,TI_status(%r11)
6784+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6785 CFI_REMEMBER_STATE
6786 jnz sysenter_tracesys
6787 cmpq $(IA32_NR_syscalls-1),%rax
6788@@ -162,13 +198,15 @@ sysenter_do_call:
6789 sysenter_dispatch:
6790 call *ia32_sys_call_table(,%rax,8)
6791 movq %rax,RAX-ARGOFFSET(%rsp)
6792- GET_THREAD_INFO(%r10)
6793+ GET_THREAD_INFO(%r11)
6794 DISABLE_INTERRUPTS(CLBR_NONE)
6795 TRACE_IRQS_OFF
6796- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6797+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6798 jnz sysexit_audit
6799 sysexit_from_sys_call:
6800- andl $~TS_COMPAT,TI_status(%r10)
6801+ pax_exit_kernel_user
6802+ pax_erase_kstack
6803+ andl $~TS_COMPAT,TI_status(%r11)
6804 /* clear IF, that popfq doesn't enable interrupts early */
6805 andl $~0x200,EFLAGS-R11(%rsp)
6806 movl RIP-R11(%rsp),%edx /* User %eip */
6807@@ -194,6 +232,9 @@ sysexit_from_sys_call:
6808 movl %eax,%esi /* 2nd arg: syscall number */
6809 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6810 call audit_syscall_entry
6811+
6812+ pax_erase_kstack
6813+
6814 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6815 cmpq $(IA32_NR_syscalls-1),%rax
6816 ja ia32_badsys
6817@@ -205,7 +246,7 @@ sysexit_from_sys_call:
6818 .endm
6819
6820 .macro auditsys_exit exit
6821- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6822+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6823 jnz ia32_ret_from_sys_call
6824 TRACE_IRQS_ON
6825 sti
6826@@ -215,12 +256,12 @@ sysexit_from_sys_call:
6827 movzbl %al,%edi /* zero-extend that into %edi */
6828 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
6829 call audit_syscall_exit
6830- GET_THREAD_INFO(%r10)
6831+ GET_THREAD_INFO(%r11)
6832 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
6833 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
6834 cli
6835 TRACE_IRQS_OFF
6836- testl %edi,TI_flags(%r10)
6837+ testl %edi,TI_flags(%r11)
6838 jz \exit
6839 CLEAR_RREGS -ARGOFFSET
6840 jmp int_with_check
6841@@ -238,7 +279,7 @@ sysexit_audit:
6842
6843 sysenter_tracesys:
6844 #ifdef CONFIG_AUDITSYSCALL
6845- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6846+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6847 jz sysenter_auditsys
6848 #endif
6849 SAVE_REST
6850@@ -246,6 +287,9 @@ sysenter_tracesys:
6851 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6852 movq %rsp,%rdi /* &pt_regs -> arg1 */
6853 call syscall_trace_enter
6854+
6855+ pax_erase_kstack
6856+
6857 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6858 RESTORE_REST
6859 cmpq $(IA32_NR_syscalls-1),%rax
6860@@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
6861 ENTRY(ia32_cstar_target)
6862 CFI_STARTPROC32 simple
6863 CFI_SIGNAL_FRAME
6864- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6865+ CFI_DEF_CFA rsp,0
6866 CFI_REGISTER rip,rcx
6867 /*CFI_REGISTER rflags,r11*/
6868 SWAPGS_UNSAFE_STACK
6869 movl %esp,%r8d
6870 CFI_REGISTER rsp,r8
6871 movq PER_CPU_VAR(kernel_stack),%rsp
6872+ SAVE_ARGS 8*6,0,0
6873+ pax_enter_kernel_user
6874 /*
6875 * No need to follow this irqs on/off section: the syscall
6876 * disabled irqs and here we enable it straight after entry:
6877 */
6878 ENABLE_INTERRUPTS(CLBR_NONE)
6879- SAVE_ARGS 8,0,0
6880 movl %eax,%eax /* zero extension */
6881 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6882 movq %rcx,RIP-ARGOFFSET(%rsp)
6883@@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
6884 /* no need to do an access_ok check here because r8 has been
6885 32bit zero extended */
6886 /* hardware stack frame is complete now */
6887+
6888+#ifdef CONFIG_PAX_MEMORY_UDEREF
6889+ mov $PAX_USER_SHADOW_BASE,%r11
6890+ add %r11,%r8
6891+#endif
6892+
6893 1: movl (%r8),%r9d
6894 .section __ex_table,"a"
6895 .quad 1b,ia32_badarg
6896 .previous
6897- GET_THREAD_INFO(%r10)
6898- orl $TS_COMPAT,TI_status(%r10)
6899- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6900+ GET_THREAD_INFO(%r11)
6901+ orl $TS_COMPAT,TI_status(%r11)
6902+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6903 CFI_REMEMBER_STATE
6904 jnz cstar_tracesys
6905 cmpq $IA32_NR_syscalls-1,%rax
6906@@ -321,13 +372,15 @@ cstar_do_call:
6907 cstar_dispatch:
6908 call *ia32_sys_call_table(,%rax,8)
6909 movq %rax,RAX-ARGOFFSET(%rsp)
6910- GET_THREAD_INFO(%r10)
6911+ GET_THREAD_INFO(%r11)
6912 DISABLE_INTERRUPTS(CLBR_NONE)
6913 TRACE_IRQS_OFF
6914- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6915+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6916 jnz sysretl_audit
6917 sysretl_from_sys_call:
6918- andl $~TS_COMPAT,TI_status(%r10)
6919+ pax_exit_kernel_user
6920+ pax_erase_kstack
6921+ andl $~TS_COMPAT,TI_status(%r11)
6922 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
6923 movl RIP-ARGOFFSET(%rsp),%ecx
6924 CFI_REGISTER rip,rcx
6925@@ -355,7 +408,7 @@ sysretl_audit:
6926
6927 cstar_tracesys:
6928 #ifdef CONFIG_AUDITSYSCALL
6929- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6930+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6931 jz cstar_auditsys
6932 #endif
6933 xchgl %r9d,%ebp
6934@@ -364,6 +417,9 @@ cstar_tracesys:
6935 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6936 movq %rsp,%rdi /* &pt_regs -> arg1 */
6937 call syscall_trace_enter
6938+
6939+ pax_erase_kstack
6940+
6941 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6942 RESTORE_REST
6943 xchgl %ebp,%r9d
6944@@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
6945 CFI_REL_OFFSET rip,RIP-RIP
6946 PARAVIRT_ADJUST_EXCEPTION_FRAME
6947 SWAPGS
6948- /*
6949- * No need to follow this irqs on/off section: the syscall
6950- * disabled irqs and here we enable it straight after entry:
6951- */
6952- ENABLE_INTERRUPTS(CLBR_NONE)
6953 movl %eax,%eax
6954 pushq_cfi %rax
6955 cld
6956 /* note the registers are not zero extended to the sf.
6957 this could be a problem. */
6958 SAVE_ARGS 0,1,0
6959- GET_THREAD_INFO(%r10)
6960- orl $TS_COMPAT,TI_status(%r10)
6961- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6962+ pax_enter_kernel_user
6963+ /*
6964+ * No need to follow this irqs on/off section: the syscall
6965+ * disabled irqs and here we enable it straight after entry:
6966+ */
6967+ ENABLE_INTERRUPTS(CLBR_NONE)
6968+ GET_THREAD_INFO(%r11)
6969+ orl $TS_COMPAT,TI_status(%r11)
6970+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6971 jnz ia32_tracesys
6972 cmpq $(IA32_NR_syscalls-1),%rax
6973 ja ia32_badsys
6974@@ -441,6 +498,9 @@ ia32_tracesys:
6975 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6976 movq %rsp,%rdi /* &pt_regs -> arg1 */
6977 call syscall_trace_enter
6978+
6979+ pax_erase_kstack
6980+
6981 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6982 RESTORE_REST
6983 cmpq $(IA32_NR_syscalls-1),%rax
6984@@ -455,6 +515,7 @@ ia32_badsys:
6985
6986 quiet_ni_syscall:
6987 movq $-ENOSYS,%rax
6988+ pax_force_retaddr
6989 ret
6990 CFI_ENDPROC
6991
6992diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
6993index f6f5c53..b358b28 100644
6994--- a/arch/x86/ia32/sys_ia32.c
6995+++ b/arch/x86/ia32/sys_ia32.c
6996@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
6997 */
6998 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
6999 {
7000- typeof(ubuf->st_uid) uid = 0;
7001- typeof(ubuf->st_gid) gid = 0;
7002+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
7003+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
7004 SET_UID(uid, stat->uid);
7005 SET_GID(gid, stat->gid);
7006 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
7007@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
7008 }
7009 set_fs(KERNEL_DS);
7010 ret = sys_rt_sigprocmask(how,
7011- set ? (sigset_t __user *)&s : NULL,
7012- oset ? (sigset_t __user *)&s : NULL,
7013+ set ? (sigset_t __force_user *)&s : NULL,
7014+ oset ? (sigset_t __force_user *)&s : NULL,
7015 sigsetsize);
7016 set_fs(old_fs);
7017 if (ret)
7018@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
7019 return alarm_setitimer(seconds);
7020 }
7021
7022-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
7023+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
7024 int options)
7025 {
7026 return compat_sys_wait4(pid, stat_addr, options, NULL);
7027@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
7028 mm_segment_t old_fs = get_fs();
7029
7030 set_fs(KERNEL_DS);
7031- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
7032+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
7033 set_fs(old_fs);
7034 if (put_compat_timespec(&t, interval))
7035 return -EFAULT;
7036@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
7037 mm_segment_t old_fs = get_fs();
7038
7039 set_fs(KERNEL_DS);
7040- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
7041+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
7042 set_fs(old_fs);
7043 if (!ret) {
7044 switch (_NSIG_WORDS) {
7045@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
7046 if (copy_siginfo_from_user32(&info, uinfo))
7047 return -EFAULT;
7048 set_fs(KERNEL_DS);
7049- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
7050+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
7051 set_fs(old_fs);
7052 return ret;
7053 }
7054@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
7055 return -EFAULT;
7056
7057 set_fs(KERNEL_DS);
7058- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7059+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7060 count);
7061 set_fs(old_fs);
7062
7063diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7064index 091508b..e245ff2 100644
7065--- a/arch/x86/include/asm/alternative-asm.h
7066+++ b/arch/x86/include/asm/alternative-asm.h
7067@@ -4,10 +4,10 @@
7068
7069 #ifdef CONFIG_SMP
7070 .macro LOCK_PREFIX
7071-1: lock
7072+672: lock
7073 .section .smp_locks,"a"
7074 .balign 4
7075- .long 1b - .
7076+ .long 672b - .
7077 .previous
7078 .endm
7079 #else
7080@@ -15,6 +15,45 @@
7081 .endm
7082 #endif
7083
7084+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
7085+ .macro pax_force_retaddr_bts rip=0
7086+ btsq $63,\rip(%rsp)
7087+ .endm
7088+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7089+ .macro pax_force_retaddr rip=0, reload=0
7090+ btsq $63,\rip(%rsp)
7091+ .endm
7092+ .macro pax_force_fptr ptr
7093+ btsq $63,\ptr
7094+ .endm
7095+ .macro pax_set_fptr_mask
7096+ .endm
7097+#endif
7098+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7099+ .macro pax_force_retaddr rip=0, reload=0
7100+ .if \reload
7101+ pax_set_fptr_mask
7102+ .endif
7103+ orq %r10,\rip(%rsp)
7104+ .endm
7105+ .macro pax_force_fptr ptr
7106+ orq %r10,\ptr
7107+ .endm
7108+ .macro pax_set_fptr_mask
7109+ movabs $0x8000000000000000,%r10
7110+ .endm
7111+#endif
7112+#else
7113+ .macro pax_force_retaddr rip=0, reload=0
7114+ .endm
7115+ .macro pax_force_fptr ptr
7116+ .endm
7117+ .macro pax_force_retaddr_bts rip=0
7118+ .endm
7119+ .macro pax_set_fptr_mask
7120+ .endm
7121+#endif
7122+
7123 .macro altinstruction_entry orig alt feature orig_len alt_len
7124 .long \orig - .
7125 .long \alt - .
7126diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7127index 37ad100..7d47faa 100644
7128--- a/arch/x86/include/asm/alternative.h
7129+++ b/arch/x86/include/asm/alternative.h
7130@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7131 ".section .discard,\"aw\",@progbits\n" \
7132 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7133 ".previous\n" \
7134- ".section .altinstr_replacement, \"ax\"\n" \
7135+ ".section .altinstr_replacement, \"a\"\n" \
7136 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7137 ".previous"
7138
7139diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7140index 1a6c09a..fec2432 100644
7141--- a/arch/x86/include/asm/apic.h
7142+++ b/arch/x86/include/asm/apic.h
7143@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7144
7145 #ifdef CONFIG_X86_LOCAL_APIC
7146
7147-extern unsigned int apic_verbosity;
7148+extern int apic_verbosity;
7149 extern int local_apic_timer_c2_ok;
7150
7151 extern int disable_apic;
7152diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7153index 20370c6..a2eb9b0 100644
7154--- a/arch/x86/include/asm/apm.h
7155+++ b/arch/x86/include/asm/apm.h
7156@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7157 __asm__ __volatile__(APM_DO_ZERO_SEGS
7158 "pushl %%edi\n\t"
7159 "pushl %%ebp\n\t"
7160- "lcall *%%cs:apm_bios_entry\n\t"
7161+ "lcall *%%ss:apm_bios_entry\n\t"
7162 "setc %%al\n\t"
7163 "popl %%ebp\n\t"
7164 "popl %%edi\n\t"
7165@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7166 __asm__ __volatile__(APM_DO_ZERO_SEGS
7167 "pushl %%edi\n\t"
7168 "pushl %%ebp\n\t"
7169- "lcall *%%cs:apm_bios_entry\n\t"
7170+ "lcall *%%ss:apm_bios_entry\n\t"
7171 "setc %%bl\n\t"
7172 "popl %%ebp\n\t"
7173 "popl %%edi\n\t"
7174diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7175index 58cb6d4..ca9010d 100644
7176--- a/arch/x86/include/asm/atomic.h
7177+++ b/arch/x86/include/asm/atomic.h
7178@@ -22,7 +22,18 @@
7179 */
7180 static inline int atomic_read(const atomic_t *v)
7181 {
7182- return (*(volatile int *)&(v)->counter);
7183+ return (*(volatile const int *)&(v)->counter);
7184+}
7185+
7186+/**
7187+ * atomic_read_unchecked - read atomic variable
7188+ * @v: pointer of type atomic_unchecked_t
7189+ *
7190+ * Atomically reads the value of @v.
7191+ */
7192+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7193+{
7194+ return (*(volatile const int *)&(v)->counter);
7195 }
7196
7197 /**
7198@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7199 }
7200
7201 /**
7202+ * atomic_set_unchecked - set atomic variable
7203+ * @v: pointer of type atomic_unchecked_t
7204+ * @i: required value
7205+ *
7206+ * Atomically sets the value of @v to @i.
7207+ */
7208+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7209+{
7210+ v->counter = i;
7211+}
7212+
7213+/**
7214 * atomic_add - add integer to atomic variable
7215 * @i: integer value to add
7216 * @v: pointer of type atomic_t
7217@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7218 */
7219 static inline void atomic_add(int i, atomic_t *v)
7220 {
7221- asm volatile(LOCK_PREFIX "addl %1,%0"
7222+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7223+
7224+#ifdef CONFIG_PAX_REFCOUNT
7225+ "jno 0f\n"
7226+ LOCK_PREFIX "subl %1,%0\n"
7227+ "int $4\n0:\n"
7228+ _ASM_EXTABLE(0b, 0b)
7229+#endif
7230+
7231+ : "+m" (v->counter)
7232+ : "ir" (i));
7233+}
7234+
7235+/**
7236+ * atomic_add_unchecked - add integer to atomic variable
7237+ * @i: integer value to add
7238+ * @v: pointer of type atomic_unchecked_t
7239+ *
7240+ * Atomically adds @i to @v.
7241+ */
7242+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7243+{
7244+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7245 : "+m" (v->counter)
7246 : "ir" (i));
7247 }
7248@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7249 */
7250 static inline void atomic_sub(int i, atomic_t *v)
7251 {
7252- asm volatile(LOCK_PREFIX "subl %1,%0"
7253+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7254+
7255+#ifdef CONFIG_PAX_REFCOUNT
7256+ "jno 0f\n"
7257+ LOCK_PREFIX "addl %1,%0\n"
7258+ "int $4\n0:\n"
7259+ _ASM_EXTABLE(0b, 0b)
7260+#endif
7261+
7262+ : "+m" (v->counter)
7263+ : "ir" (i));
7264+}
7265+
7266+/**
7267+ * atomic_sub_unchecked - subtract integer from atomic variable
7268+ * @i: integer value to subtract
7269+ * @v: pointer of type atomic_unchecked_t
7270+ *
7271+ * Atomically subtracts @i from @v.
7272+ */
7273+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7274+{
7275+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7276 : "+m" (v->counter)
7277 : "ir" (i));
7278 }
7279@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7280 {
7281 unsigned char c;
7282
7283- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7284+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7285+
7286+#ifdef CONFIG_PAX_REFCOUNT
7287+ "jno 0f\n"
7288+ LOCK_PREFIX "addl %2,%0\n"
7289+ "int $4\n0:\n"
7290+ _ASM_EXTABLE(0b, 0b)
7291+#endif
7292+
7293+ "sete %1\n"
7294 : "+m" (v->counter), "=qm" (c)
7295 : "ir" (i) : "memory");
7296 return c;
7297@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7298 */
7299 static inline void atomic_inc(atomic_t *v)
7300 {
7301- asm volatile(LOCK_PREFIX "incl %0"
7302+ asm volatile(LOCK_PREFIX "incl %0\n"
7303+
7304+#ifdef CONFIG_PAX_REFCOUNT
7305+ "jno 0f\n"
7306+ LOCK_PREFIX "decl %0\n"
7307+ "int $4\n0:\n"
7308+ _ASM_EXTABLE(0b, 0b)
7309+#endif
7310+
7311+ : "+m" (v->counter));
7312+}
7313+
7314+/**
7315+ * atomic_inc_unchecked - increment atomic variable
7316+ * @v: pointer of type atomic_unchecked_t
7317+ *
7318+ * Atomically increments @v by 1.
7319+ */
7320+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7321+{
7322+ asm volatile(LOCK_PREFIX "incl %0\n"
7323 : "+m" (v->counter));
7324 }
7325
7326@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7327 */
7328 static inline void atomic_dec(atomic_t *v)
7329 {
7330- asm volatile(LOCK_PREFIX "decl %0"
7331+ asm volatile(LOCK_PREFIX "decl %0\n"
7332+
7333+#ifdef CONFIG_PAX_REFCOUNT
7334+ "jno 0f\n"
7335+ LOCK_PREFIX "incl %0\n"
7336+ "int $4\n0:\n"
7337+ _ASM_EXTABLE(0b, 0b)
7338+#endif
7339+
7340+ : "+m" (v->counter));
7341+}
7342+
7343+/**
7344+ * atomic_dec_unchecked - decrement atomic variable
7345+ * @v: pointer of type atomic_unchecked_t
7346+ *
7347+ * Atomically decrements @v by 1.
7348+ */
7349+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7350+{
7351+ asm volatile(LOCK_PREFIX "decl %0\n"
7352 : "+m" (v->counter));
7353 }
7354
7355@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7356 {
7357 unsigned char c;
7358
7359- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7360+ asm volatile(LOCK_PREFIX "decl %0\n"
7361+
7362+#ifdef CONFIG_PAX_REFCOUNT
7363+ "jno 0f\n"
7364+ LOCK_PREFIX "incl %0\n"
7365+ "int $4\n0:\n"
7366+ _ASM_EXTABLE(0b, 0b)
7367+#endif
7368+
7369+ "sete %1\n"
7370 : "+m" (v->counter), "=qm" (c)
7371 : : "memory");
7372 return c != 0;
7373@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7374 {
7375 unsigned char c;
7376
7377- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7378+ asm volatile(LOCK_PREFIX "incl %0\n"
7379+
7380+#ifdef CONFIG_PAX_REFCOUNT
7381+ "jno 0f\n"
7382+ LOCK_PREFIX "decl %0\n"
7383+ "int $4\n0:\n"
7384+ _ASM_EXTABLE(0b, 0b)
7385+#endif
7386+
7387+ "sete %1\n"
7388+ : "+m" (v->counter), "=qm" (c)
7389+ : : "memory");
7390+ return c != 0;
7391+}
7392+
7393+/**
7394+ * atomic_inc_and_test_unchecked - increment and test
7395+ * @v: pointer of type atomic_unchecked_t
7396+ *
7397+ * Atomically increments @v by 1
7398+ * and returns true if the result is zero, or false for all
7399+ * other cases.
7400+ */
7401+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7402+{
7403+ unsigned char c;
7404+
7405+ asm volatile(LOCK_PREFIX "incl %0\n"
7406+ "sete %1\n"
7407 : "+m" (v->counter), "=qm" (c)
7408 : : "memory");
7409 return c != 0;
7410@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7411 {
7412 unsigned char c;
7413
7414- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7415+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7416+
7417+#ifdef CONFIG_PAX_REFCOUNT
7418+ "jno 0f\n"
7419+ LOCK_PREFIX "subl %2,%0\n"
7420+ "int $4\n0:\n"
7421+ _ASM_EXTABLE(0b, 0b)
7422+#endif
7423+
7424+ "sets %1\n"
7425 : "+m" (v->counter), "=qm" (c)
7426 : "ir" (i) : "memory");
7427 return c;
7428@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
7429 goto no_xadd;
7430 #endif
7431 /* Modern 486+ processor */
7432- return i + xadd(&v->counter, i);
7433+ return i + xadd_check_overflow(&v->counter, i);
7434
7435 #ifdef CONFIG_M386
7436 no_xadd: /* Legacy 386 processor */
7437@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
7438 }
7439
7440 /**
7441+ * atomic_add_return_unchecked - add integer and return
7442+ * @i: integer value to add
7443+ * @v: pointer of type atomic_unchecked_t
7444+ *
7445+ * Atomically adds @i to @v and returns @i + @v
7446+ */
7447+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7448+{
7449+#ifdef CONFIG_M386
7450+ int __i;
7451+ unsigned long flags;
7452+ if (unlikely(boot_cpu_data.x86 <= 3))
7453+ goto no_xadd;
7454+#endif
7455+ /* Modern 486+ processor */
7456+ return i + xadd(&v->counter, i);
7457+
7458+#ifdef CONFIG_M386
7459+no_xadd: /* Legacy 386 processor */
7460+ raw_local_irq_save(flags);
7461+ __i = atomic_read_unchecked(v);
7462+ atomic_set_unchecked(v, i + __i);
7463+ raw_local_irq_restore(flags);
7464+ return i + __i;
7465+#endif
7466+}
7467+
7468+/**
7469 * atomic_sub_return - subtract integer and return
7470 * @v: pointer of type atomic_t
7471 * @i: integer value to subtract
7472@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7473 }
7474
7475 #define atomic_inc_return(v) (atomic_add_return(1, v))
7476+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7477+{
7478+ return atomic_add_return_unchecked(1, v);
7479+}
7480 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7481
7482 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7483@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7484 return cmpxchg(&v->counter, old, new);
7485 }
7486
7487+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7488+{
7489+ return cmpxchg(&v->counter, old, new);
7490+}
7491+
7492 static inline int atomic_xchg(atomic_t *v, int new)
7493 {
7494 return xchg(&v->counter, new);
7495 }
7496
7497+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7498+{
7499+ return xchg(&v->counter, new);
7500+}
7501+
7502 /**
7503 * __atomic_add_unless - add unless the number is already a given value
7504 * @v: pointer of type atomic_t
7505@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7506 */
7507 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7508 {
7509- int c, old;
7510+ int c, old, new;
7511 c = atomic_read(v);
7512 for (;;) {
7513- if (unlikely(c == (u)))
7514+ if (unlikely(c == u))
7515 break;
7516- old = atomic_cmpxchg((v), c, c + (a));
7517+
7518+ asm volatile("addl %2,%0\n"
7519+
7520+#ifdef CONFIG_PAX_REFCOUNT
7521+ "jno 0f\n"
7522+ "subl %2,%0\n"
7523+ "int $4\n0:\n"
7524+ _ASM_EXTABLE(0b, 0b)
7525+#endif
7526+
7527+ : "=r" (new)
7528+ : "0" (c), "ir" (a));
7529+
7530+ old = atomic_cmpxchg(v, c, new);
7531 if (likely(old == c))
7532 break;
7533 c = old;
7534@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7535 return c;
7536 }
7537
7538+/**
7539+ * atomic_inc_not_zero_hint - increment if not null
7540+ * @v: pointer of type atomic_t
7541+ * @hint: probable value of the atomic before the increment
7542+ *
7543+ * This version of atomic_inc_not_zero() gives a hint of probable
7544+ * value of the atomic. This helps processor to not read the memory
7545+ * before doing the atomic read/modify/write cycle, lowering
7546+ * number of bus transactions on some arches.
7547+ *
7548+ * Returns: 0 if increment was not done, 1 otherwise.
7549+ */
7550+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7551+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7552+{
7553+ int val, c = hint, new;
7554+
7555+ /* sanity test, should be removed by compiler if hint is a constant */
7556+ if (!hint)
7557+ return __atomic_add_unless(v, 1, 0);
7558+
7559+ do {
7560+ asm volatile("incl %0\n"
7561+
7562+#ifdef CONFIG_PAX_REFCOUNT
7563+ "jno 0f\n"
7564+ "decl %0\n"
7565+ "int $4\n0:\n"
7566+ _ASM_EXTABLE(0b, 0b)
7567+#endif
7568+
7569+ : "=r" (new)
7570+ : "0" (c));
7571+
7572+ val = atomic_cmpxchg(v, c, new);
7573+ if (val == c)
7574+ return 1;
7575+ c = val;
7576+ } while (c);
7577+
7578+ return 0;
7579+}
7580
7581 /*
7582 * atomic_dec_if_positive - decrement by 1 if old value positive
7583diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7584index 24098aa..1e37723 100644
7585--- a/arch/x86/include/asm/atomic64_32.h
7586+++ b/arch/x86/include/asm/atomic64_32.h
7587@@ -12,6 +12,14 @@ typedef struct {
7588 u64 __aligned(8) counter;
7589 } atomic64_t;
7590
7591+#ifdef CONFIG_PAX_REFCOUNT
7592+typedef struct {
7593+ u64 __aligned(8) counter;
7594+} atomic64_unchecked_t;
7595+#else
7596+typedef atomic64_t atomic64_unchecked_t;
7597+#endif
7598+
7599 #define ATOMIC64_INIT(val) { (val) }
7600
7601 #ifdef CONFIG_X86_CMPXCHG64
7602@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7603 }
7604
7605 /**
7606+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7607+ * @p: pointer to type atomic64_unchecked_t
7608+ * @o: expected value
7609+ * @n: new value
7610+ *
7611+ * Atomically sets @v to @n if it was equal to @o and returns
7612+ * the old value.
7613+ */
7614+
7615+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7616+{
7617+ return cmpxchg64(&v->counter, o, n);
7618+}
7619+
7620+/**
7621 * atomic64_xchg - xchg atomic64 variable
7622 * @v: pointer to type atomic64_t
7623 * @n: value to assign
7624@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7625 }
7626
7627 /**
7628+ * atomic64_set_unchecked - set atomic64 variable
7629+ * @v: pointer to type atomic64_unchecked_t
7630+ * @n: value to assign
7631+ *
7632+ * Atomically sets the value of @v to @n.
7633+ */
7634+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7635+{
7636+ unsigned high = (unsigned)(i >> 32);
7637+ unsigned low = (unsigned)i;
7638+ asm volatile(ATOMIC64_ALTERNATIVE(set)
7639+ : "+b" (low), "+c" (high)
7640+ : "S" (v)
7641+ : "eax", "edx", "memory"
7642+ );
7643+}
7644+
7645+/**
7646 * atomic64_read - read atomic64 variable
7647 * @v: pointer to type atomic64_t
7648 *
7649@@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7650 }
7651
7652 /**
7653+ * atomic64_read_unchecked - read atomic64 variable
7654+ * @v: pointer to type atomic64_unchecked_t
7655+ *
7656+ * Atomically reads the value of @v and returns it.
7657+ */
7658+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7659+{
7660+ long long r;
7661+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7662+ : "=A" (r), "+c" (v)
7663+ : : "memory"
7664+ );
7665+ return r;
7666+ }
7667+
7668+/**
7669 * atomic64_add_return - add and return
7670 * @i: integer value to add
7671 * @v: pointer to type atomic64_t
7672@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7673 return i;
7674 }
7675
7676+/**
7677+ * atomic64_add_return_unchecked - add and return
7678+ * @i: integer value to add
7679+ * @v: pointer to type atomic64_unchecked_t
7680+ *
7681+ * Atomically adds @i to @v and returns @i + *@v
7682+ */
7683+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7684+{
7685+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7686+ : "+A" (i), "+c" (v)
7687+ : : "memory"
7688+ );
7689+ return i;
7690+}
7691+
7692 /*
7693 * Other variants with different arithmetic operators:
7694 */
7695@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7696 return a;
7697 }
7698
7699+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7700+{
7701+ long long a;
7702+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7703+ : "=A" (a)
7704+ : "S" (v)
7705+ : "memory", "ecx"
7706+ );
7707+ return a;
7708+}
7709+
7710 static inline long long atomic64_dec_return(atomic64_t *v)
7711 {
7712 long long a;
7713@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7714 }
7715
7716 /**
7717+ * atomic64_add_unchecked - add integer to atomic64 variable
7718+ * @i: integer value to add
7719+ * @v: pointer to type atomic64_unchecked_t
7720+ *
7721+ * Atomically adds @i to @v.
7722+ */
7723+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7724+{
7725+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7726+ : "+A" (i), "+c" (v)
7727+ : : "memory"
7728+ );
7729+ return i;
7730+}
7731+
7732+/**
7733 * atomic64_sub - subtract the atomic64 variable
7734 * @i: integer value to subtract
7735 * @v: pointer to type atomic64_t
7736diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7737index 0e1cbfc..5623683 100644
7738--- a/arch/x86/include/asm/atomic64_64.h
7739+++ b/arch/x86/include/asm/atomic64_64.h
7740@@ -18,7 +18,19 @@
7741 */
7742 static inline long atomic64_read(const atomic64_t *v)
7743 {
7744- return (*(volatile long *)&(v)->counter);
7745+ return (*(volatile const long *)&(v)->counter);
7746+}
7747+
7748+/**
7749+ * atomic64_read_unchecked - read atomic64 variable
7750+ * @v: pointer of type atomic64_unchecked_t
7751+ *
7752+ * Atomically reads the value of @v.
7753+ * Doesn't imply a read memory barrier.
7754+ */
7755+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7756+{
7757+ return (*(volatile const long *)&(v)->counter);
7758 }
7759
7760 /**
7761@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
7762 }
7763
7764 /**
7765+ * atomic64_set_unchecked - set atomic64 variable
7766+ * @v: pointer to type atomic64_unchecked_t
7767+ * @i: required value
7768+ *
7769+ * Atomically sets the value of @v to @i.
7770+ */
7771+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7772+{
7773+ v->counter = i;
7774+}
7775+
7776+/**
7777 * atomic64_add - add integer to atomic64 variable
7778 * @i: integer value to add
7779 * @v: pointer to type atomic64_t
7780@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
7781 */
7782 static inline void atomic64_add(long i, atomic64_t *v)
7783 {
7784+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7785+
7786+#ifdef CONFIG_PAX_REFCOUNT
7787+ "jno 0f\n"
7788+ LOCK_PREFIX "subq %1,%0\n"
7789+ "int $4\n0:\n"
7790+ _ASM_EXTABLE(0b, 0b)
7791+#endif
7792+
7793+ : "=m" (v->counter)
7794+ : "er" (i), "m" (v->counter));
7795+}
7796+
7797+/**
7798+ * atomic64_add_unchecked - add integer to atomic64 variable
7799+ * @i: integer value to add
7800+ * @v: pointer to type atomic64_unchecked_t
7801+ *
7802+ * Atomically adds @i to @v.
7803+ */
7804+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7805+{
7806 asm volatile(LOCK_PREFIX "addq %1,%0"
7807 : "=m" (v->counter)
7808 : "er" (i), "m" (v->counter));
7809@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
7810 */
7811 static inline void atomic64_sub(long i, atomic64_t *v)
7812 {
7813- asm volatile(LOCK_PREFIX "subq %1,%0"
7814+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7815+
7816+#ifdef CONFIG_PAX_REFCOUNT
7817+ "jno 0f\n"
7818+ LOCK_PREFIX "addq %1,%0\n"
7819+ "int $4\n0:\n"
7820+ _ASM_EXTABLE(0b, 0b)
7821+#endif
7822+
7823+ : "=m" (v->counter)
7824+ : "er" (i), "m" (v->counter));
7825+}
7826+
7827+/**
7828+ * atomic64_sub_unchecked - subtract the atomic64 variable
7829+ * @i: integer value to subtract
7830+ * @v: pointer to type atomic64_unchecked_t
7831+ *
7832+ * Atomically subtracts @i from @v.
7833+ */
7834+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
7835+{
7836+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7837 : "=m" (v->counter)
7838 : "er" (i), "m" (v->counter));
7839 }
7840@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7841 {
7842 unsigned char c;
7843
7844- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7845+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7846+
7847+#ifdef CONFIG_PAX_REFCOUNT
7848+ "jno 0f\n"
7849+ LOCK_PREFIX "addq %2,%0\n"
7850+ "int $4\n0:\n"
7851+ _ASM_EXTABLE(0b, 0b)
7852+#endif
7853+
7854+ "sete %1\n"
7855 : "=m" (v->counter), "=qm" (c)
7856 : "er" (i), "m" (v->counter) : "memory");
7857 return c;
7858@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7859 */
7860 static inline void atomic64_inc(atomic64_t *v)
7861 {
7862+ asm volatile(LOCK_PREFIX "incq %0\n"
7863+
7864+#ifdef CONFIG_PAX_REFCOUNT
7865+ "jno 0f\n"
7866+ LOCK_PREFIX "decq %0\n"
7867+ "int $4\n0:\n"
7868+ _ASM_EXTABLE(0b, 0b)
7869+#endif
7870+
7871+ : "=m" (v->counter)
7872+ : "m" (v->counter));
7873+}
7874+
7875+/**
7876+ * atomic64_inc_unchecked - increment atomic64 variable
7877+ * @v: pointer to type atomic64_unchecked_t
7878+ *
7879+ * Atomically increments @v by 1.
7880+ */
7881+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7882+{
7883 asm volatile(LOCK_PREFIX "incq %0"
7884 : "=m" (v->counter)
7885 : "m" (v->counter));
7886@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
7887 */
7888 static inline void atomic64_dec(atomic64_t *v)
7889 {
7890- asm volatile(LOCK_PREFIX "decq %0"
7891+ asm volatile(LOCK_PREFIX "decq %0\n"
7892+
7893+#ifdef CONFIG_PAX_REFCOUNT
7894+ "jno 0f\n"
7895+ LOCK_PREFIX "incq %0\n"
7896+ "int $4\n0:\n"
7897+ _ASM_EXTABLE(0b, 0b)
7898+#endif
7899+
7900+ : "=m" (v->counter)
7901+ : "m" (v->counter));
7902+}
7903+
7904+/**
7905+ * atomic64_dec_unchecked - decrement atomic64 variable
7906+ * @v: pointer to type atomic64_t
7907+ *
7908+ * Atomically decrements @v by 1.
7909+ */
7910+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7911+{
7912+ asm volatile(LOCK_PREFIX "decq %0\n"
7913 : "=m" (v->counter)
7914 : "m" (v->counter));
7915 }
7916@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
7917 {
7918 unsigned char c;
7919
7920- asm volatile(LOCK_PREFIX "decq %0; sete %1"
7921+ asm volatile(LOCK_PREFIX "decq %0\n"
7922+
7923+#ifdef CONFIG_PAX_REFCOUNT
7924+ "jno 0f\n"
7925+ LOCK_PREFIX "incq %0\n"
7926+ "int $4\n0:\n"
7927+ _ASM_EXTABLE(0b, 0b)
7928+#endif
7929+
7930+ "sete %1\n"
7931 : "=m" (v->counter), "=qm" (c)
7932 : "m" (v->counter) : "memory");
7933 return c != 0;
7934@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
7935 {
7936 unsigned char c;
7937
7938- asm volatile(LOCK_PREFIX "incq %0; sete %1"
7939+ asm volatile(LOCK_PREFIX "incq %0\n"
7940+
7941+#ifdef CONFIG_PAX_REFCOUNT
7942+ "jno 0f\n"
7943+ LOCK_PREFIX "decq %0\n"
7944+ "int $4\n0:\n"
7945+ _ASM_EXTABLE(0b, 0b)
7946+#endif
7947+
7948+ "sete %1\n"
7949 : "=m" (v->counter), "=qm" (c)
7950 : "m" (v->counter) : "memory");
7951 return c != 0;
7952@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7953 {
7954 unsigned char c;
7955
7956- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7957+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
7958+
7959+#ifdef CONFIG_PAX_REFCOUNT
7960+ "jno 0f\n"
7961+ LOCK_PREFIX "subq %2,%0\n"
7962+ "int $4\n0:\n"
7963+ _ASM_EXTABLE(0b, 0b)
7964+#endif
7965+
7966+ "sets %1\n"
7967 : "=m" (v->counter), "=qm" (c)
7968 : "er" (i), "m" (v->counter) : "memory");
7969 return c;
7970@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7971 */
7972 static inline long atomic64_add_return(long i, atomic64_t *v)
7973 {
7974+ return i + xadd_check_overflow(&v->counter, i);
7975+}
7976+
7977+/**
7978+ * atomic64_add_return_unchecked - add and return
7979+ * @i: integer value to add
7980+ * @v: pointer to type atomic64_unchecked_t
7981+ *
7982+ * Atomically adds @i to @v and returns @i + @v
7983+ */
7984+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7985+{
7986 return i + xadd(&v->counter, i);
7987 }
7988
7989@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
7990 }
7991
7992 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7993+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7994+{
7995+ return atomic64_add_return_unchecked(1, v);
7996+}
7997 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7998
7999 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8000@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8001 return cmpxchg(&v->counter, old, new);
8002 }
8003
8004+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8005+{
8006+ return cmpxchg(&v->counter, old, new);
8007+}
8008+
8009 static inline long atomic64_xchg(atomic64_t *v, long new)
8010 {
8011 return xchg(&v->counter, new);
8012@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
8013 */
8014 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8015 {
8016- long c, old;
8017+ long c, old, new;
8018 c = atomic64_read(v);
8019 for (;;) {
8020- if (unlikely(c == (u)))
8021+ if (unlikely(c == u))
8022 break;
8023- old = atomic64_cmpxchg((v), c, c + (a));
8024+
8025+ asm volatile("add %2,%0\n"
8026+
8027+#ifdef CONFIG_PAX_REFCOUNT
8028+ "jno 0f\n"
8029+ "sub %2,%0\n"
8030+ "int $4\n0:\n"
8031+ _ASM_EXTABLE(0b, 0b)
8032+#endif
8033+
8034+ : "=r" (new)
8035+ : "0" (c), "ir" (a));
8036+
8037+ old = atomic64_cmpxchg(v, c, new);
8038 if (likely(old == c))
8039 break;
8040 c = old;
8041 }
8042- return c != (u);
8043+ return c != u;
8044 }
8045
8046 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8047diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
8048index 1775d6e..b65017f 100644
8049--- a/arch/x86/include/asm/bitops.h
8050+++ b/arch/x86/include/asm/bitops.h
8051@@ -38,7 +38,7 @@
8052 * a mask operation on a byte.
8053 */
8054 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8055-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8056+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8057 #define CONST_MASK(nr) (1 << ((nr) & 7))
8058
8059 /**
8060diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8061index 5e1a2ee..c9f9533 100644
8062--- a/arch/x86/include/asm/boot.h
8063+++ b/arch/x86/include/asm/boot.h
8064@@ -11,10 +11,15 @@
8065 #include <asm/pgtable_types.h>
8066
8067 /* Physical address where kernel should be loaded. */
8068-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8069+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8070 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8071 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8072
8073+#ifndef __ASSEMBLY__
8074+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8075+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8076+#endif
8077+
8078 /* Minimum kernel alignment, as a power of two */
8079 #ifdef CONFIG_X86_64
8080 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8081diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8082index 48f99f1..d78ebf9 100644
8083--- a/arch/x86/include/asm/cache.h
8084+++ b/arch/x86/include/asm/cache.h
8085@@ -5,12 +5,13 @@
8086
8087 /* L1 cache line size */
8088 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8089-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8090+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8091
8092 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8093+#define __read_only __attribute__((__section__(".data..read_only")))
8094
8095 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8096-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8097+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8098
8099 #ifdef CONFIG_X86_VSMP
8100 #ifdef CONFIG_SMP
8101diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8102index 4e12668..501d239 100644
8103--- a/arch/x86/include/asm/cacheflush.h
8104+++ b/arch/x86/include/asm/cacheflush.h
8105@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8106 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8107
8108 if (pg_flags == _PGMT_DEFAULT)
8109- return -1;
8110+ return ~0UL;
8111 else if (pg_flags == _PGMT_WC)
8112 return _PAGE_CACHE_WC;
8113 else if (pg_flags == _PGMT_UC_MINUS)
8114diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8115index 46fc474..b02b0f9 100644
8116--- a/arch/x86/include/asm/checksum_32.h
8117+++ b/arch/x86/include/asm/checksum_32.h
8118@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8119 int len, __wsum sum,
8120 int *src_err_ptr, int *dst_err_ptr);
8121
8122+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8123+ int len, __wsum sum,
8124+ int *src_err_ptr, int *dst_err_ptr);
8125+
8126+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8127+ int len, __wsum sum,
8128+ int *src_err_ptr, int *dst_err_ptr);
8129+
8130 /*
8131 * Note: when you get a NULL pointer exception here this means someone
8132 * passed in an incorrect kernel address to one of these functions.
8133@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8134 int *err_ptr)
8135 {
8136 might_sleep();
8137- return csum_partial_copy_generic((__force void *)src, dst,
8138+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8139 len, sum, err_ptr, NULL);
8140 }
8141
8142@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8143 {
8144 might_sleep();
8145 if (access_ok(VERIFY_WRITE, dst, len))
8146- return csum_partial_copy_generic(src, (__force void *)dst,
8147+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8148 len, sum, NULL, err_ptr);
8149
8150 if (len)
8151diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
8152index 5d3acdf..6447a02 100644
8153--- a/arch/x86/include/asm/cmpxchg.h
8154+++ b/arch/x86/include/asm/cmpxchg.h
8155@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
8156 __compiletime_error("Bad argument size for cmpxchg");
8157 extern void __xadd_wrong_size(void)
8158 __compiletime_error("Bad argument size for xadd");
8159+extern void __xadd_check_overflow_wrong_size(void)
8160+ __compiletime_error("Bad argument size for xadd_check_overflow");
8161
8162 /*
8163 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
8164@@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
8165 __ret; \
8166 })
8167
8168+#define __xadd_check_overflow(ptr, inc, lock) \
8169+ ({ \
8170+ __typeof__ (*(ptr)) __ret = (inc); \
8171+ switch (sizeof(*(ptr))) { \
8172+ case __X86_CASE_L: \
8173+ asm volatile (lock "xaddl %0, %1\n" \
8174+ "jno 0f\n" \
8175+ "mov %0,%1\n" \
8176+ "int $4\n0:\n" \
8177+ _ASM_EXTABLE(0b, 0b) \
8178+ : "+r" (__ret), "+m" (*(ptr)) \
8179+ : : "memory", "cc"); \
8180+ break; \
8181+ case __X86_CASE_Q: \
8182+ asm volatile (lock "xaddq %q0, %1\n" \
8183+ "jno 0f\n" \
8184+ "mov %0,%1\n" \
8185+ "int $4\n0:\n" \
8186+ _ASM_EXTABLE(0b, 0b) \
8187+ : "+r" (__ret), "+m" (*(ptr)) \
8188+ : : "memory", "cc"); \
8189+ break; \
8190+ default: \
8191+ __xadd_check_overflow_wrong_size(); \
8192+ } \
8193+ __ret; \
8194+ })
8195+
8196 /*
8197 * xadd() adds "inc" to "*ptr" and atomically returns the previous
8198 * value of "*ptr".
8199@@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
8200 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
8201 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
8202
8203+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
8204+
8205 #endif /* ASM_X86_CMPXCHG_H */
8206diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8207index f3444f7..051a196 100644
8208--- a/arch/x86/include/asm/cpufeature.h
8209+++ b/arch/x86/include/asm/cpufeature.h
8210@@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8211 ".section .discard,\"aw\",@progbits\n"
8212 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8213 ".previous\n"
8214- ".section .altinstr_replacement,\"ax\"\n"
8215+ ".section .altinstr_replacement,\"a\"\n"
8216 "3: movb $1,%0\n"
8217 "4:\n"
8218 ".previous\n"
8219diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8220index 41935fa..3b40db8 100644
8221--- a/arch/x86/include/asm/desc.h
8222+++ b/arch/x86/include/asm/desc.h
8223@@ -4,6 +4,7 @@
8224 #include <asm/desc_defs.h>
8225 #include <asm/ldt.h>
8226 #include <asm/mmu.h>
8227+#include <asm/pgtable.h>
8228
8229 #include <linux/smp.h>
8230
8231@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8232
8233 desc->type = (info->read_exec_only ^ 1) << 1;
8234 desc->type |= info->contents << 2;
8235+ desc->type |= info->seg_not_present ^ 1;
8236
8237 desc->s = 1;
8238 desc->dpl = 0x3;
8239@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8240 }
8241
8242 extern struct desc_ptr idt_descr;
8243-extern gate_desc idt_table[];
8244-
8245-struct gdt_page {
8246- struct desc_struct gdt[GDT_ENTRIES];
8247-} __attribute__((aligned(PAGE_SIZE)));
8248-
8249-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8250+extern gate_desc idt_table[256];
8251
8252+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8253 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8254 {
8255- return per_cpu(gdt_page, cpu).gdt;
8256+ return cpu_gdt_table[cpu];
8257 }
8258
8259 #ifdef CONFIG_X86_64
8260@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8261 unsigned long base, unsigned dpl, unsigned flags,
8262 unsigned short seg)
8263 {
8264- gate->a = (seg << 16) | (base & 0xffff);
8265- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8266+ gate->gate.offset_low = base;
8267+ gate->gate.seg = seg;
8268+ gate->gate.reserved = 0;
8269+ gate->gate.type = type;
8270+ gate->gate.s = 0;
8271+ gate->gate.dpl = dpl;
8272+ gate->gate.p = 1;
8273+ gate->gate.offset_high = base >> 16;
8274 }
8275
8276 #endif
8277@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8278
8279 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8280 {
8281+ pax_open_kernel();
8282 memcpy(&idt[entry], gate, sizeof(*gate));
8283+ pax_close_kernel();
8284 }
8285
8286 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8287 {
8288+ pax_open_kernel();
8289 memcpy(&ldt[entry], desc, 8);
8290+ pax_close_kernel();
8291 }
8292
8293 static inline void
8294@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8295 default: size = sizeof(*gdt); break;
8296 }
8297
8298+ pax_open_kernel();
8299 memcpy(&gdt[entry], desc, size);
8300+ pax_close_kernel();
8301 }
8302
8303 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8304@@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8305
8306 static inline void native_load_tr_desc(void)
8307 {
8308+ pax_open_kernel();
8309 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8310+ pax_close_kernel();
8311 }
8312
8313 static inline void native_load_gdt(const struct desc_ptr *dtr)
8314@@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8315 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8316 unsigned int i;
8317
8318+ pax_open_kernel();
8319 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8320 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8321+ pax_close_kernel();
8322 }
8323
8324 #define _LDT_empty(info) \
8325@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8326 desc->limit = (limit >> 16) & 0xf;
8327 }
8328
8329-static inline void _set_gate(int gate, unsigned type, void *addr,
8330+static inline void _set_gate(int gate, unsigned type, const void *addr,
8331 unsigned dpl, unsigned ist, unsigned seg)
8332 {
8333 gate_desc s;
8334@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8335 * Pentium F0 0F bugfix can have resulted in the mapped
8336 * IDT being write-protected.
8337 */
8338-static inline void set_intr_gate(unsigned int n, void *addr)
8339+static inline void set_intr_gate(unsigned int n, const void *addr)
8340 {
8341 BUG_ON((unsigned)n > 0xFF);
8342 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8343@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8344 /*
8345 * This routine sets up an interrupt gate at directory privilege level 3.
8346 */
8347-static inline void set_system_intr_gate(unsigned int n, void *addr)
8348+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8349 {
8350 BUG_ON((unsigned)n > 0xFF);
8351 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8352 }
8353
8354-static inline void set_system_trap_gate(unsigned int n, void *addr)
8355+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8356 {
8357 BUG_ON((unsigned)n > 0xFF);
8358 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8359 }
8360
8361-static inline void set_trap_gate(unsigned int n, void *addr)
8362+static inline void set_trap_gate(unsigned int n, const void *addr)
8363 {
8364 BUG_ON((unsigned)n > 0xFF);
8365 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8366@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8367 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8368 {
8369 BUG_ON((unsigned)n > 0xFF);
8370- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8371+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8372 }
8373
8374-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8375+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8376 {
8377 BUG_ON((unsigned)n > 0xFF);
8378 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8379 }
8380
8381-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8382+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8383 {
8384 BUG_ON((unsigned)n > 0xFF);
8385 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8386 }
8387
8388+#ifdef CONFIG_X86_32
8389+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8390+{
8391+ struct desc_struct d;
8392+
8393+ if (likely(limit))
8394+ limit = (limit - 1UL) >> PAGE_SHIFT;
8395+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8396+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8397+}
8398+#endif
8399+
8400 #endif /* _ASM_X86_DESC_H */
8401diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8402index 278441f..b95a174 100644
8403--- a/arch/x86/include/asm/desc_defs.h
8404+++ b/arch/x86/include/asm/desc_defs.h
8405@@ -31,6 +31,12 @@ struct desc_struct {
8406 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8407 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8408 };
8409+ struct {
8410+ u16 offset_low;
8411+ u16 seg;
8412+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8413+ unsigned offset_high: 16;
8414+ } gate;
8415 };
8416 } __attribute__((packed));
8417
8418diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8419index 908b969..a1f4eb4 100644
8420--- a/arch/x86/include/asm/e820.h
8421+++ b/arch/x86/include/asm/e820.h
8422@@ -69,7 +69,7 @@ struct e820map {
8423 #define ISA_START_ADDRESS 0xa0000
8424 #define ISA_END_ADDRESS 0x100000
8425
8426-#define BIOS_BEGIN 0x000a0000
8427+#define BIOS_BEGIN 0x000c0000
8428 #define BIOS_END 0x00100000
8429
8430 #define BIOS_ROM_BASE 0xffe00000
8431diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8432index 5f962df..7289f09 100644
8433--- a/arch/x86/include/asm/elf.h
8434+++ b/arch/x86/include/asm/elf.h
8435@@ -238,7 +238,25 @@ extern int force_personality32;
8436 the loader. We need to make sure that it is out of the way of the program
8437 that it will "exec", and that there is sufficient room for the brk. */
8438
8439+#ifdef CONFIG_PAX_SEGMEXEC
8440+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8441+#else
8442 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8443+#endif
8444+
8445+#ifdef CONFIG_PAX_ASLR
8446+#ifdef CONFIG_X86_32
8447+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8448+
8449+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8450+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8451+#else
8452+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8453+
8454+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8455+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8456+#endif
8457+#endif
8458
8459 /* This yields a mask that user programs can use to figure out what
8460 instruction set this CPU supports. This could be done in user space,
8461@@ -291,9 +309,7 @@ do { \
8462
8463 #define ARCH_DLINFO \
8464 do { \
8465- if (vdso_enabled) \
8466- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8467- (unsigned long)current->mm->context.vdso); \
8468+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8469 } while (0)
8470
8471 #define AT_SYSINFO 32
8472@@ -304,7 +320,7 @@ do { \
8473
8474 #endif /* !CONFIG_X86_32 */
8475
8476-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8477+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8478
8479 #define VDSO_ENTRY \
8480 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8481@@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8482 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8483 #define compat_arch_setup_additional_pages syscall32_setup_pages
8484
8485-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8486-#define arch_randomize_brk arch_randomize_brk
8487-
8488 /*
8489 * True on X86_32 or when emulating IA32 on X86_64
8490 */
8491diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8492index cc70c1c..d96d011 100644
8493--- a/arch/x86/include/asm/emergency-restart.h
8494+++ b/arch/x86/include/asm/emergency-restart.h
8495@@ -15,6 +15,6 @@ enum reboot_type {
8496
8497 extern enum reboot_type reboot_type;
8498
8499-extern void machine_emergency_restart(void);
8500+extern void machine_emergency_restart(void) __noreturn;
8501
8502 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8503diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8504index d09bb03..4ea4194 100644
8505--- a/arch/x86/include/asm/futex.h
8506+++ b/arch/x86/include/asm/futex.h
8507@@ -12,16 +12,18 @@
8508 #include <asm/system.h>
8509
8510 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8511+ typecheck(u32 __user *, uaddr); \
8512 asm volatile("1:\t" insn "\n" \
8513 "2:\t.section .fixup,\"ax\"\n" \
8514 "3:\tmov\t%3, %1\n" \
8515 "\tjmp\t2b\n" \
8516 "\t.previous\n" \
8517 _ASM_EXTABLE(1b, 3b) \
8518- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8519+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8520 : "i" (-EFAULT), "0" (oparg), "1" (0))
8521
8522 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8523+ typecheck(u32 __user *, uaddr); \
8524 asm volatile("1:\tmovl %2, %0\n" \
8525 "\tmovl\t%0, %3\n" \
8526 "\t" insn "\n" \
8527@@ -34,7 +36,7 @@
8528 _ASM_EXTABLE(1b, 4b) \
8529 _ASM_EXTABLE(2b, 4b) \
8530 : "=&a" (oldval), "=&r" (ret), \
8531- "+m" (*uaddr), "=&r" (tem) \
8532+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8533 : "r" (oparg), "i" (-EFAULT), "1" (0))
8534
8535 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8536@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8537
8538 switch (op) {
8539 case FUTEX_OP_SET:
8540- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8541+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8542 break;
8543 case FUTEX_OP_ADD:
8544- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8545+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8546 uaddr, oparg);
8547 break;
8548 case FUTEX_OP_OR:
8549@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8550 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8551 return -EFAULT;
8552
8553- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8554+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8555 "2:\t.section .fixup, \"ax\"\n"
8556 "3:\tmov %3, %0\n"
8557 "\tjmp 2b\n"
8558 "\t.previous\n"
8559 _ASM_EXTABLE(1b, 3b)
8560- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8561+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8562 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8563 : "memory"
8564 );
8565diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8566index eb92a6e..b98b2f4 100644
8567--- a/arch/x86/include/asm/hw_irq.h
8568+++ b/arch/x86/include/asm/hw_irq.h
8569@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8570 extern void enable_IO_APIC(void);
8571
8572 /* Statistics */
8573-extern atomic_t irq_err_count;
8574-extern atomic_t irq_mis_count;
8575+extern atomic_unchecked_t irq_err_count;
8576+extern atomic_unchecked_t irq_mis_count;
8577
8578 /* EISA */
8579 extern void eisa_set_level_irq(unsigned int irq);
8580diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8581index c9e09ea..73888df 100644
8582--- a/arch/x86/include/asm/i387.h
8583+++ b/arch/x86/include/asm/i387.h
8584@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8585 {
8586 int err;
8587
8588+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8589+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8590+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8591+#endif
8592+
8593 /* See comment in fxsave() below. */
8594 #ifdef CONFIG_AS_FXSAVEQ
8595 asm volatile("1: fxrstorq %[fx]\n\t"
8596@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8597 {
8598 int err;
8599
8600+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8601+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8602+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8603+#endif
8604+
8605 /*
8606 * Clear the bytes not touched by the fxsave and reserved
8607 * for the SW usage.
8608@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
8609 #endif /* CONFIG_X86_64 */
8610
8611 /* We need a safe address that is cheap to find and that is already
8612- in L1 during context switch. The best choices are unfortunately
8613- different for UP and SMP */
8614-#ifdef CONFIG_SMP
8615-#define safe_address (__per_cpu_offset[0])
8616-#else
8617-#define safe_address (kstat_cpu(0).cpustat.user)
8618-#endif
8619+ in L1 during context switch. */
8620+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8621
8622 /*
8623 * These must be called with preempt disabled
8624@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
8625 struct thread_info *me = current_thread_info();
8626 preempt_disable();
8627 if (me->status & TS_USEDFPU)
8628- __save_init_fpu(me->task);
8629+ __save_init_fpu(current);
8630 else
8631 clts();
8632 }
8633diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8634index d8e8eef..99f81ae 100644
8635--- a/arch/x86/include/asm/io.h
8636+++ b/arch/x86/include/asm/io.h
8637@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8638
8639 #include <linux/vmalloc.h>
8640
8641+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8642+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8643+{
8644+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8645+}
8646+
8647+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8648+{
8649+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8650+}
8651+
8652 /*
8653 * Convert a virtual cached pointer to an uncached pointer
8654 */
8655diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8656index bba3cf8..06bc8da 100644
8657--- a/arch/x86/include/asm/irqflags.h
8658+++ b/arch/x86/include/asm/irqflags.h
8659@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8660 sti; \
8661 sysexit
8662
8663+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8664+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8665+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8666+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8667+
8668 #else
8669 #define INTERRUPT_RETURN iret
8670 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8671diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8672index 5478825..839e88c 100644
8673--- a/arch/x86/include/asm/kprobes.h
8674+++ b/arch/x86/include/asm/kprobes.h
8675@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8676 #define RELATIVEJUMP_SIZE 5
8677 #define RELATIVECALL_OPCODE 0xe8
8678 #define RELATIVE_ADDR_SIZE 4
8679-#define MAX_STACK_SIZE 64
8680-#define MIN_STACK_SIZE(ADDR) \
8681- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8682- THREAD_SIZE - (unsigned long)(ADDR))) \
8683- ? (MAX_STACK_SIZE) \
8684- : (((unsigned long)current_thread_info()) + \
8685- THREAD_SIZE - (unsigned long)(ADDR)))
8686+#define MAX_STACK_SIZE 64UL
8687+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8688
8689 #define flush_insn_slot(p) do { } while (0)
8690
8691diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8692index b4973f4..7c4d3fc 100644
8693--- a/arch/x86/include/asm/kvm_host.h
8694+++ b/arch/x86/include/asm/kvm_host.h
8695@@ -459,7 +459,7 @@ struct kvm_arch {
8696 unsigned int n_requested_mmu_pages;
8697 unsigned int n_max_mmu_pages;
8698 unsigned int indirect_shadow_pages;
8699- atomic_t invlpg_counter;
8700+ atomic_unchecked_t invlpg_counter;
8701 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8702 /*
8703 * Hash table of struct kvm_mmu_page.
8704@@ -638,7 +638,7 @@ struct kvm_x86_ops {
8705 int (*check_intercept)(struct kvm_vcpu *vcpu,
8706 struct x86_instruction_info *info,
8707 enum x86_intercept_stage stage);
8708-};
8709+} __do_const;
8710
8711 struct kvm_arch_async_pf {
8712 u32 token;
8713diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8714index 9cdae5d..300d20f 100644
8715--- a/arch/x86/include/asm/local.h
8716+++ b/arch/x86/include/asm/local.h
8717@@ -18,26 +18,58 @@ typedef struct {
8718
8719 static inline void local_inc(local_t *l)
8720 {
8721- asm volatile(_ASM_INC "%0"
8722+ asm volatile(_ASM_INC "%0\n"
8723+
8724+#ifdef CONFIG_PAX_REFCOUNT
8725+ "jno 0f\n"
8726+ _ASM_DEC "%0\n"
8727+ "int $4\n0:\n"
8728+ _ASM_EXTABLE(0b, 0b)
8729+#endif
8730+
8731 : "+m" (l->a.counter));
8732 }
8733
8734 static inline void local_dec(local_t *l)
8735 {
8736- asm volatile(_ASM_DEC "%0"
8737+ asm volatile(_ASM_DEC "%0\n"
8738+
8739+#ifdef CONFIG_PAX_REFCOUNT
8740+ "jno 0f\n"
8741+ _ASM_INC "%0\n"
8742+ "int $4\n0:\n"
8743+ _ASM_EXTABLE(0b, 0b)
8744+#endif
8745+
8746 : "+m" (l->a.counter));
8747 }
8748
8749 static inline void local_add(long i, local_t *l)
8750 {
8751- asm volatile(_ASM_ADD "%1,%0"
8752+ asm volatile(_ASM_ADD "%1,%0\n"
8753+
8754+#ifdef CONFIG_PAX_REFCOUNT
8755+ "jno 0f\n"
8756+ _ASM_SUB "%1,%0\n"
8757+ "int $4\n0:\n"
8758+ _ASM_EXTABLE(0b, 0b)
8759+#endif
8760+
8761 : "+m" (l->a.counter)
8762 : "ir" (i));
8763 }
8764
8765 static inline void local_sub(long i, local_t *l)
8766 {
8767- asm volatile(_ASM_SUB "%1,%0"
8768+ asm volatile(_ASM_SUB "%1,%0\n"
8769+
8770+#ifdef CONFIG_PAX_REFCOUNT
8771+ "jno 0f\n"
8772+ _ASM_ADD "%1,%0\n"
8773+ "int $4\n0:\n"
8774+ _ASM_EXTABLE(0b, 0b)
8775+#endif
8776+
8777 : "+m" (l->a.counter)
8778 : "ir" (i));
8779 }
8780@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
8781 {
8782 unsigned char c;
8783
8784- asm volatile(_ASM_SUB "%2,%0; sete %1"
8785+ asm volatile(_ASM_SUB "%2,%0\n"
8786+
8787+#ifdef CONFIG_PAX_REFCOUNT
8788+ "jno 0f\n"
8789+ _ASM_ADD "%2,%0\n"
8790+ "int $4\n0:\n"
8791+ _ASM_EXTABLE(0b, 0b)
8792+#endif
8793+
8794+ "sete %1\n"
8795 : "+m" (l->a.counter), "=qm" (c)
8796 : "ir" (i) : "memory");
8797 return c;
8798@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
8799 {
8800 unsigned char c;
8801
8802- asm volatile(_ASM_DEC "%0; sete %1"
8803+ asm volatile(_ASM_DEC "%0\n"
8804+
8805+#ifdef CONFIG_PAX_REFCOUNT
8806+ "jno 0f\n"
8807+ _ASM_INC "%0\n"
8808+ "int $4\n0:\n"
8809+ _ASM_EXTABLE(0b, 0b)
8810+#endif
8811+
8812+ "sete %1\n"
8813 : "+m" (l->a.counter), "=qm" (c)
8814 : : "memory");
8815 return c != 0;
8816@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
8817 {
8818 unsigned char c;
8819
8820- asm volatile(_ASM_INC "%0; sete %1"
8821+ asm volatile(_ASM_INC "%0\n"
8822+
8823+#ifdef CONFIG_PAX_REFCOUNT
8824+ "jno 0f\n"
8825+ _ASM_DEC "%0\n"
8826+ "int $4\n0:\n"
8827+ _ASM_EXTABLE(0b, 0b)
8828+#endif
8829+
8830+ "sete %1\n"
8831 : "+m" (l->a.counter), "=qm" (c)
8832 : : "memory");
8833 return c != 0;
8834@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
8835 {
8836 unsigned char c;
8837
8838- asm volatile(_ASM_ADD "%2,%0; sets %1"
8839+ asm volatile(_ASM_ADD "%2,%0\n"
8840+
8841+#ifdef CONFIG_PAX_REFCOUNT
8842+ "jno 0f\n"
8843+ _ASM_SUB "%2,%0\n"
8844+ "int $4\n0:\n"
8845+ _ASM_EXTABLE(0b, 0b)
8846+#endif
8847+
8848+ "sets %1\n"
8849 : "+m" (l->a.counter), "=qm" (c)
8850 : "ir" (i) : "memory");
8851 return c;
8852@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
8853 #endif
8854 /* Modern 486+ processor */
8855 __i = i;
8856- asm volatile(_ASM_XADD "%0, %1;"
8857+ asm volatile(_ASM_XADD "%0, %1\n"
8858+
8859+#ifdef CONFIG_PAX_REFCOUNT
8860+ "jno 0f\n"
8861+ _ASM_MOV "%0,%1\n"
8862+ "int $4\n0:\n"
8863+ _ASM_EXTABLE(0b, 0b)
8864+#endif
8865+
8866 : "+r" (i), "+m" (l->a.counter)
8867 : : "memory");
8868 return i + __i;
8869diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
8870index 593e51d..fa69c9a 100644
8871--- a/arch/x86/include/asm/mman.h
8872+++ b/arch/x86/include/asm/mman.h
8873@@ -5,4 +5,14 @@
8874
8875 #include <asm-generic/mman.h>
8876
8877+#ifdef __KERNEL__
8878+#ifndef __ASSEMBLY__
8879+#ifdef CONFIG_X86_32
8880+#define arch_mmap_check i386_mmap_check
8881+int i386_mmap_check(unsigned long addr, unsigned long len,
8882+ unsigned long flags);
8883+#endif
8884+#endif
8885+#endif
8886+
8887 #endif /* _ASM_X86_MMAN_H */
8888diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
8889index 5f55e69..e20bfb1 100644
8890--- a/arch/x86/include/asm/mmu.h
8891+++ b/arch/x86/include/asm/mmu.h
8892@@ -9,7 +9,7 @@
8893 * we put the segment information here.
8894 */
8895 typedef struct {
8896- void *ldt;
8897+ struct desc_struct *ldt;
8898 int size;
8899
8900 #ifdef CONFIG_X86_64
8901@@ -18,7 +18,19 @@ typedef struct {
8902 #endif
8903
8904 struct mutex lock;
8905- void *vdso;
8906+ unsigned long vdso;
8907+
8908+#ifdef CONFIG_X86_32
8909+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8910+ unsigned long user_cs_base;
8911+ unsigned long user_cs_limit;
8912+
8913+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8914+ cpumask_t cpu_user_cs_mask;
8915+#endif
8916+
8917+#endif
8918+#endif
8919 } mm_context_t;
8920
8921 #ifdef CONFIG_SMP
8922diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
8923index 6902152..399f3a2 100644
8924--- a/arch/x86/include/asm/mmu_context.h
8925+++ b/arch/x86/include/asm/mmu_context.h
8926@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
8927
8928 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8929 {
8930+
8931+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8932+ unsigned int i;
8933+ pgd_t *pgd;
8934+
8935+ pax_open_kernel();
8936+ pgd = get_cpu_pgd(smp_processor_id());
8937+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8938+ set_pgd_batched(pgd+i, native_make_pgd(0));
8939+ pax_close_kernel();
8940+#endif
8941+
8942 #ifdef CONFIG_SMP
8943 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8944 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8945@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8946 struct task_struct *tsk)
8947 {
8948 unsigned cpu = smp_processor_id();
8949+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8950+ int tlbstate = TLBSTATE_OK;
8951+#endif
8952
8953 if (likely(prev != next)) {
8954 #ifdef CONFIG_SMP
8955+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8956+ tlbstate = percpu_read(cpu_tlbstate.state);
8957+#endif
8958 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8959 percpu_write(cpu_tlbstate.active_mm, next);
8960 #endif
8961 cpumask_set_cpu(cpu, mm_cpumask(next));
8962
8963 /* Re-load page tables */
8964+#ifdef CONFIG_PAX_PER_CPU_PGD
8965+ pax_open_kernel();
8966+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8967+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8968+ pax_close_kernel();
8969+ load_cr3(get_cpu_pgd(cpu));
8970+#else
8971 load_cr3(next->pgd);
8972+#endif
8973
8974 /* stop flush ipis for the previous mm */
8975 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8976@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8977 */
8978 if (unlikely(prev->context.ldt != next->context.ldt))
8979 load_LDT_nolock(&next->context);
8980- }
8981+
8982+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8983+ if (!(__supported_pte_mask & _PAGE_NX)) {
8984+ smp_mb__before_clear_bit();
8985+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8986+ smp_mb__after_clear_bit();
8987+ cpu_set(cpu, next->context.cpu_user_cs_mask);
8988+ }
8989+#endif
8990+
8991+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8992+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8993+ prev->context.user_cs_limit != next->context.user_cs_limit))
8994+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8995 #ifdef CONFIG_SMP
8996+ else if (unlikely(tlbstate != TLBSTATE_OK))
8997+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8998+#endif
8999+#endif
9000+
9001+ }
9002 else {
9003+
9004+#ifdef CONFIG_PAX_PER_CPU_PGD
9005+ pax_open_kernel();
9006+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9007+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9008+ pax_close_kernel();
9009+ load_cr3(get_cpu_pgd(cpu));
9010+#endif
9011+
9012+#ifdef CONFIG_SMP
9013 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9014 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9015
9016@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9017 * tlb flush IPI delivery. We must reload CR3
9018 * to make sure to use no freed page tables.
9019 */
9020+
9021+#ifndef CONFIG_PAX_PER_CPU_PGD
9022 load_cr3(next->pgd);
9023+#endif
9024+
9025 load_LDT_nolock(&next->context);
9026+
9027+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9028+ if (!(__supported_pte_mask & _PAGE_NX))
9029+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9030+#endif
9031+
9032+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9033+#ifdef CONFIG_PAX_PAGEEXEC
9034+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
9035+#endif
9036+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9037+#endif
9038+
9039 }
9040+#endif
9041 }
9042-#endif
9043 }
9044
9045 #define activate_mm(prev, next) \
9046diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
9047index 9eae775..c914fea 100644
9048--- a/arch/x86/include/asm/module.h
9049+++ b/arch/x86/include/asm/module.h
9050@@ -5,6 +5,7 @@
9051
9052 #ifdef CONFIG_X86_64
9053 /* X86_64 does not define MODULE_PROC_FAMILY */
9054+#define MODULE_PROC_FAMILY ""
9055 #elif defined CONFIG_M386
9056 #define MODULE_PROC_FAMILY "386 "
9057 #elif defined CONFIG_M486
9058@@ -59,8 +60,20 @@
9059 #error unknown processor family
9060 #endif
9061
9062-#ifdef CONFIG_X86_32
9063-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
9064+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9065+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
9066+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
9067+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
9068+#else
9069+#define MODULE_PAX_KERNEXEC ""
9070 #endif
9071
9072+#ifdef CONFIG_PAX_MEMORY_UDEREF
9073+#define MODULE_PAX_UDEREF "UDEREF "
9074+#else
9075+#define MODULE_PAX_UDEREF ""
9076+#endif
9077+
9078+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
9079+
9080 #endif /* _ASM_X86_MODULE_H */
9081diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
9082index 7639dbf..e08a58c 100644
9083--- a/arch/x86/include/asm/page_64_types.h
9084+++ b/arch/x86/include/asm/page_64_types.h
9085@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9086
9087 /* duplicated to the one in bootmem.h */
9088 extern unsigned long max_pfn;
9089-extern unsigned long phys_base;
9090+extern const unsigned long phys_base;
9091
9092 extern unsigned long __phys_addr(unsigned long);
9093 #define __phys_reloc_hide(x) (x)
9094diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
9095index a7d2db9..edb023e 100644
9096--- a/arch/x86/include/asm/paravirt.h
9097+++ b/arch/x86/include/asm/paravirt.h
9098@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
9099 val);
9100 }
9101
9102+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9103+{
9104+ pgdval_t val = native_pgd_val(pgd);
9105+
9106+ if (sizeof(pgdval_t) > sizeof(long))
9107+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9108+ val, (u64)val >> 32);
9109+ else
9110+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9111+ val);
9112+}
9113+
9114 static inline void pgd_clear(pgd_t *pgdp)
9115 {
9116 set_pgd(pgdp, __pgd(0));
9117@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9118 pv_mmu_ops.set_fixmap(idx, phys, flags);
9119 }
9120
9121+#ifdef CONFIG_PAX_KERNEXEC
9122+static inline unsigned long pax_open_kernel(void)
9123+{
9124+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9125+}
9126+
9127+static inline unsigned long pax_close_kernel(void)
9128+{
9129+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9130+}
9131+#else
9132+static inline unsigned long pax_open_kernel(void) { return 0; }
9133+static inline unsigned long pax_close_kernel(void) { return 0; }
9134+#endif
9135+
9136 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9137
9138 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9139@@ -964,7 +991,7 @@ extern void default_banner(void);
9140
9141 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9142 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9143-#define PARA_INDIRECT(addr) *%cs:addr
9144+#define PARA_INDIRECT(addr) *%ss:addr
9145 #endif
9146
9147 #define INTERRUPT_RETURN \
9148@@ -1041,6 +1068,21 @@ extern void default_banner(void);
9149 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9150 CLBR_NONE, \
9151 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9152+
9153+#define GET_CR0_INTO_RDI \
9154+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9155+ mov %rax,%rdi
9156+
9157+#define SET_RDI_INTO_CR0 \
9158+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9159+
9160+#define GET_CR3_INTO_RDI \
9161+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9162+ mov %rax,%rdi
9163+
9164+#define SET_RDI_INTO_CR3 \
9165+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9166+
9167 #endif /* CONFIG_X86_32 */
9168
9169 #endif /* __ASSEMBLY__ */
9170diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9171index 8e8b9a4..f07d725 100644
9172--- a/arch/x86/include/asm/paravirt_types.h
9173+++ b/arch/x86/include/asm/paravirt_types.h
9174@@ -84,20 +84,20 @@ struct pv_init_ops {
9175 */
9176 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9177 unsigned long addr, unsigned len);
9178-};
9179+} __no_const;
9180
9181
9182 struct pv_lazy_ops {
9183 /* Set deferred update mode, used for batching operations. */
9184 void (*enter)(void);
9185 void (*leave)(void);
9186-};
9187+} __no_const;
9188
9189 struct pv_time_ops {
9190 unsigned long long (*sched_clock)(void);
9191 unsigned long long (*steal_clock)(int cpu);
9192 unsigned long (*get_tsc_khz)(void);
9193-};
9194+} __no_const;
9195
9196 struct pv_cpu_ops {
9197 /* hooks for various privileged instructions */
9198@@ -193,7 +193,7 @@ struct pv_cpu_ops {
9199
9200 void (*start_context_switch)(struct task_struct *prev);
9201 void (*end_context_switch)(struct task_struct *next);
9202-};
9203+} __no_const;
9204
9205 struct pv_irq_ops {
9206 /*
9207@@ -224,7 +224,7 @@ struct pv_apic_ops {
9208 unsigned long start_eip,
9209 unsigned long start_esp);
9210 #endif
9211-};
9212+} __no_const;
9213
9214 struct pv_mmu_ops {
9215 unsigned long (*read_cr2)(void);
9216@@ -313,6 +313,7 @@ struct pv_mmu_ops {
9217 struct paravirt_callee_save make_pud;
9218
9219 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9220+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9221 #endif /* PAGETABLE_LEVELS == 4 */
9222 #endif /* PAGETABLE_LEVELS >= 3 */
9223
9224@@ -324,6 +325,12 @@ struct pv_mmu_ops {
9225 an mfn. We can tell which is which from the index. */
9226 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9227 phys_addr_t phys, pgprot_t flags);
9228+
9229+#ifdef CONFIG_PAX_KERNEXEC
9230+ unsigned long (*pax_open_kernel)(void);
9231+ unsigned long (*pax_close_kernel)(void);
9232+#endif
9233+
9234 };
9235
9236 struct arch_spinlock;
9237@@ -334,7 +341,7 @@ struct pv_lock_ops {
9238 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9239 int (*spin_trylock)(struct arch_spinlock *lock);
9240 void (*spin_unlock)(struct arch_spinlock *lock);
9241-};
9242+} __no_const;
9243
9244 /* This contains all the paravirt structures: we get a convenient
9245 * number for each function using the offset which we use to indicate
9246diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9247index b4389a4..b7ff22c 100644
9248--- a/arch/x86/include/asm/pgalloc.h
9249+++ b/arch/x86/include/asm/pgalloc.h
9250@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9251 pmd_t *pmd, pte_t *pte)
9252 {
9253 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9254+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9255+}
9256+
9257+static inline void pmd_populate_user(struct mm_struct *mm,
9258+ pmd_t *pmd, pte_t *pte)
9259+{
9260+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9261 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9262 }
9263
9264diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9265index 98391db..8f6984e 100644
9266--- a/arch/x86/include/asm/pgtable-2level.h
9267+++ b/arch/x86/include/asm/pgtable-2level.h
9268@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9269
9270 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9271 {
9272+ pax_open_kernel();
9273 *pmdp = pmd;
9274+ pax_close_kernel();
9275 }
9276
9277 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9278diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9279index effff47..f9e4035 100644
9280--- a/arch/x86/include/asm/pgtable-3level.h
9281+++ b/arch/x86/include/asm/pgtable-3level.h
9282@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9283
9284 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9285 {
9286+ pax_open_kernel();
9287 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9288+ pax_close_kernel();
9289 }
9290
9291 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9292 {
9293+ pax_open_kernel();
9294 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9295+ pax_close_kernel();
9296 }
9297
9298 /*
9299diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9300index 18601c8..3d716d1 100644
9301--- a/arch/x86/include/asm/pgtable.h
9302+++ b/arch/x86/include/asm/pgtable.h
9303@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9304
9305 #ifndef __PAGETABLE_PUD_FOLDED
9306 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9307+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9308 #define pgd_clear(pgd) native_pgd_clear(pgd)
9309 #endif
9310
9311@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9312
9313 #define arch_end_context_switch(prev) do {} while(0)
9314
9315+#define pax_open_kernel() native_pax_open_kernel()
9316+#define pax_close_kernel() native_pax_close_kernel()
9317 #endif /* CONFIG_PARAVIRT */
9318
9319+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9320+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9321+
9322+#ifdef CONFIG_PAX_KERNEXEC
9323+static inline unsigned long native_pax_open_kernel(void)
9324+{
9325+ unsigned long cr0;
9326+
9327+ preempt_disable();
9328+ barrier();
9329+ cr0 = read_cr0() ^ X86_CR0_WP;
9330+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9331+ write_cr0(cr0);
9332+ return cr0 ^ X86_CR0_WP;
9333+}
9334+
9335+static inline unsigned long native_pax_close_kernel(void)
9336+{
9337+ unsigned long cr0;
9338+
9339+ cr0 = read_cr0() ^ X86_CR0_WP;
9340+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9341+ write_cr0(cr0);
9342+ barrier();
9343+ preempt_enable_no_resched();
9344+ return cr0 ^ X86_CR0_WP;
9345+}
9346+#else
9347+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9348+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9349+#endif
9350+
9351 /*
9352 * The following only work if pte_present() is true.
9353 * Undefined behaviour if not..
9354 */
9355+static inline int pte_user(pte_t pte)
9356+{
9357+ return pte_val(pte) & _PAGE_USER;
9358+}
9359+
9360 static inline int pte_dirty(pte_t pte)
9361 {
9362 return pte_flags(pte) & _PAGE_DIRTY;
9363@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9364 return pte_clear_flags(pte, _PAGE_RW);
9365 }
9366
9367+static inline pte_t pte_mkread(pte_t pte)
9368+{
9369+ return __pte(pte_val(pte) | _PAGE_USER);
9370+}
9371+
9372 static inline pte_t pte_mkexec(pte_t pte)
9373 {
9374- return pte_clear_flags(pte, _PAGE_NX);
9375+#ifdef CONFIG_X86_PAE
9376+ if (__supported_pte_mask & _PAGE_NX)
9377+ return pte_clear_flags(pte, _PAGE_NX);
9378+ else
9379+#endif
9380+ return pte_set_flags(pte, _PAGE_USER);
9381+}
9382+
9383+static inline pte_t pte_exprotect(pte_t pte)
9384+{
9385+#ifdef CONFIG_X86_PAE
9386+ if (__supported_pte_mask & _PAGE_NX)
9387+ return pte_set_flags(pte, _PAGE_NX);
9388+ else
9389+#endif
9390+ return pte_clear_flags(pte, _PAGE_USER);
9391 }
9392
9393 static inline pte_t pte_mkdirty(pte_t pte)
9394@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9395 #endif
9396
9397 #ifndef __ASSEMBLY__
9398+
9399+#ifdef CONFIG_PAX_PER_CPU_PGD
9400+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9401+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9402+{
9403+ return cpu_pgd[cpu];
9404+}
9405+#endif
9406+
9407 #include <linux/mm_types.h>
9408
9409 static inline int pte_none(pte_t pte)
9410@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9411
9412 static inline int pgd_bad(pgd_t pgd)
9413 {
9414- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9415+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9416 }
9417
9418 static inline int pgd_none(pgd_t pgd)
9419@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9420 * pgd_offset() returns a (pgd_t *)
9421 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9422 */
9423-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9424+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9425+
9426+#ifdef CONFIG_PAX_PER_CPU_PGD
9427+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9428+#endif
9429+
9430 /*
9431 * a shortcut which implies the use of the kernel's pgd, instead
9432 * of a process's
9433@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9434 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9435 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9436
9437+#ifdef CONFIG_X86_32
9438+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9439+#else
9440+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9441+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9442+
9443+#ifdef CONFIG_PAX_MEMORY_UDEREF
9444+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9445+#else
9446+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9447+#endif
9448+
9449+#endif
9450+
9451 #ifndef __ASSEMBLY__
9452
9453 extern int direct_gbpages;
9454@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9455 * dst and src can be on the same page, but the range must not overlap,
9456 * and must not cross a page boundary.
9457 */
9458-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9459+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9460 {
9461- memcpy(dst, src, count * sizeof(pgd_t));
9462+ pax_open_kernel();
9463+ while (count--)
9464+ *dst++ = *src++;
9465+ pax_close_kernel();
9466 }
9467
9468+#ifdef CONFIG_PAX_PER_CPU_PGD
9469+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9470+#endif
9471+
9472+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9473+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9474+#else
9475+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9476+#endif
9477
9478 #include <asm-generic/pgtable.h>
9479 #endif /* __ASSEMBLY__ */
9480diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9481index 0c92113..34a77c6 100644
9482--- a/arch/x86/include/asm/pgtable_32.h
9483+++ b/arch/x86/include/asm/pgtable_32.h
9484@@ -25,9 +25,6 @@
9485 struct mm_struct;
9486 struct vm_area_struct;
9487
9488-extern pgd_t swapper_pg_dir[1024];
9489-extern pgd_t initial_page_table[1024];
9490-
9491 static inline void pgtable_cache_init(void) { }
9492 static inline void check_pgt_cache(void) { }
9493 void paging_init(void);
9494@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9495 # include <asm/pgtable-2level.h>
9496 #endif
9497
9498+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9499+extern pgd_t initial_page_table[PTRS_PER_PGD];
9500+#ifdef CONFIG_X86_PAE
9501+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9502+#endif
9503+
9504 #if defined(CONFIG_HIGHPTE)
9505 #define pte_offset_map(dir, address) \
9506 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9507@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9508 /* Clear a kernel PTE and flush it from the TLB */
9509 #define kpte_clear_flush(ptep, vaddr) \
9510 do { \
9511+ pax_open_kernel(); \
9512 pte_clear(&init_mm, (vaddr), (ptep)); \
9513+ pax_close_kernel(); \
9514 __flush_tlb_one((vaddr)); \
9515 } while (0)
9516
9517@@ -74,6 +79,9 @@ do { \
9518
9519 #endif /* !__ASSEMBLY__ */
9520
9521+#define HAVE_ARCH_UNMAPPED_AREA
9522+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9523+
9524 /*
9525 * kern_addr_valid() is (1) for FLATMEM and (0) for
9526 * SPARSEMEM and DISCONTIGMEM
9527diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9528index ed5903b..c7fe163 100644
9529--- a/arch/x86/include/asm/pgtable_32_types.h
9530+++ b/arch/x86/include/asm/pgtable_32_types.h
9531@@ -8,7 +8,7 @@
9532 */
9533 #ifdef CONFIG_X86_PAE
9534 # include <asm/pgtable-3level_types.h>
9535-# define PMD_SIZE (1UL << PMD_SHIFT)
9536+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9537 # define PMD_MASK (~(PMD_SIZE - 1))
9538 #else
9539 # include <asm/pgtable-2level_types.h>
9540@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9541 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9542 #endif
9543
9544+#ifdef CONFIG_PAX_KERNEXEC
9545+#ifndef __ASSEMBLY__
9546+extern unsigned char MODULES_EXEC_VADDR[];
9547+extern unsigned char MODULES_EXEC_END[];
9548+#endif
9549+#include <asm/boot.h>
9550+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9551+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9552+#else
9553+#define ktla_ktva(addr) (addr)
9554+#define ktva_ktla(addr) (addr)
9555+#endif
9556+
9557 #define MODULES_VADDR VMALLOC_START
9558 #define MODULES_END VMALLOC_END
9559 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9560diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9561index 975f709..107976d 100644
9562--- a/arch/x86/include/asm/pgtable_64.h
9563+++ b/arch/x86/include/asm/pgtable_64.h
9564@@ -16,10 +16,14 @@
9565
9566 extern pud_t level3_kernel_pgt[512];
9567 extern pud_t level3_ident_pgt[512];
9568+extern pud_t level3_vmalloc_start_pgt[512];
9569+extern pud_t level3_vmalloc_end_pgt[512];
9570+extern pud_t level3_vmemmap_pgt[512];
9571+extern pud_t level2_vmemmap_pgt[512];
9572 extern pmd_t level2_kernel_pgt[512];
9573 extern pmd_t level2_fixmap_pgt[512];
9574-extern pmd_t level2_ident_pgt[512];
9575-extern pgd_t init_level4_pgt[];
9576+extern pmd_t level2_ident_pgt[512*2];
9577+extern pgd_t init_level4_pgt[512];
9578
9579 #define swapper_pg_dir init_level4_pgt
9580
9581@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9582
9583 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9584 {
9585+ pax_open_kernel();
9586 *pmdp = pmd;
9587+ pax_close_kernel();
9588 }
9589
9590 static inline void native_pmd_clear(pmd_t *pmd)
9591@@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9592
9593 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9594 {
9595+ pax_open_kernel();
9596+ *pgdp = pgd;
9597+ pax_close_kernel();
9598+}
9599+
9600+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9601+{
9602 *pgdp = pgd;
9603 }
9604
9605diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9606index 766ea16..5b96cb3 100644
9607--- a/arch/x86/include/asm/pgtable_64_types.h
9608+++ b/arch/x86/include/asm/pgtable_64_types.h
9609@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9610 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9611 #define MODULES_END _AC(0xffffffffff000000, UL)
9612 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9613+#define MODULES_EXEC_VADDR MODULES_VADDR
9614+#define MODULES_EXEC_END MODULES_END
9615+
9616+#define ktla_ktva(addr) (addr)
9617+#define ktva_ktla(addr) (addr)
9618
9619 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9620diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9621index 013286a..8b42f4f 100644
9622--- a/arch/x86/include/asm/pgtable_types.h
9623+++ b/arch/x86/include/asm/pgtable_types.h
9624@@ -16,13 +16,12 @@
9625 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9626 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9627 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9628-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9629+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9630 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9631 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9632 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9633-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9634-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9635-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9636+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9637+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9638 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9639
9640 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9641@@ -40,7 +39,6 @@
9642 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9643 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9644 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9645-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9646 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9647 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9648 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9649@@ -57,8 +55,10 @@
9650
9651 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9652 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9653-#else
9654+#elif defined(CONFIG_KMEMCHECK)
9655 #define _PAGE_NX (_AT(pteval_t, 0))
9656+#else
9657+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9658 #endif
9659
9660 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9661@@ -96,6 +96,9 @@
9662 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9663 _PAGE_ACCESSED)
9664
9665+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9666+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9667+
9668 #define __PAGE_KERNEL_EXEC \
9669 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9670 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9671@@ -106,7 +109,7 @@
9672 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9673 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9674 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9675-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9676+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9677 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9678 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9679 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9680@@ -168,8 +171,8 @@
9681 * bits are combined, this will alow user to access the high address mapped
9682 * VDSO in the presence of CONFIG_COMPAT_VDSO
9683 */
9684-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9685-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9686+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9687+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9688 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9689 #endif
9690
9691@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9692 {
9693 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9694 }
9695+#endif
9696
9697+#if PAGETABLE_LEVELS == 3
9698+#include <asm-generic/pgtable-nopud.h>
9699+#endif
9700+
9701+#if PAGETABLE_LEVELS == 2
9702+#include <asm-generic/pgtable-nopmd.h>
9703+#endif
9704+
9705+#ifndef __ASSEMBLY__
9706 #if PAGETABLE_LEVELS > 3
9707 typedef struct { pudval_t pud; } pud_t;
9708
9709@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9710 return pud.pud;
9711 }
9712 #else
9713-#include <asm-generic/pgtable-nopud.h>
9714-
9715 static inline pudval_t native_pud_val(pud_t pud)
9716 {
9717 return native_pgd_val(pud.pgd);
9718@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9719 return pmd.pmd;
9720 }
9721 #else
9722-#include <asm-generic/pgtable-nopmd.h>
9723-
9724 static inline pmdval_t native_pmd_val(pmd_t pmd)
9725 {
9726 return native_pgd_val(pmd.pud.pgd);
9727@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9728
9729 extern pteval_t __supported_pte_mask;
9730 extern void set_nx(void);
9731-extern int nx_enabled;
9732
9733 #define pgprot_writecombine pgprot_writecombine
9734 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9735diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9736index b650435..eefa566 100644
9737--- a/arch/x86/include/asm/processor.h
9738+++ b/arch/x86/include/asm/processor.h
9739@@ -268,7 +268,7 @@ struct tss_struct {
9740
9741 } ____cacheline_aligned;
9742
9743-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9744+extern struct tss_struct init_tss[NR_CPUS];
9745
9746 /*
9747 * Save the original ist values for checking stack pointers during debugging
9748@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
9749 */
9750 #define TASK_SIZE PAGE_OFFSET
9751 #define TASK_SIZE_MAX TASK_SIZE
9752+
9753+#ifdef CONFIG_PAX_SEGMEXEC
9754+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9755+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9756+#else
9757 #define STACK_TOP TASK_SIZE
9758-#define STACK_TOP_MAX STACK_TOP
9759+#endif
9760+
9761+#define STACK_TOP_MAX TASK_SIZE
9762
9763 #define INIT_THREAD { \
9764- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9765+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9766 .vm86_info = NULL, \
9767 .sysenter_cs = __KERNEL_CS, \
9768 .io_bitmap_ptr = NULL, \
9769@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
9770 */
9771 #define INIT_TSS { \
9772 .x86_tss = { \
9773- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9774+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9775 .ss0 = __KERNEL_DS, \
9776 .ss1 = __KERNEL_CS, \
9777 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9778@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
9779 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9780
9781 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9782-#define KSTK_TOP(info) \
9783-({ \
9784- unsigned long *__ptr = (unsigned long *)(info); \
9785- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9786-})
9787+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9788
9789 /*
9790 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9791@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9792 #define task_pt_regs(task) \
9793 ({ \
9794 struct pt_regs *__regs__; \
9795- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9796+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9797 __regs__ - 1; \
9798 })
9799
9800@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9801 /*
9802 * User space process size. 47bits minus one guard page.
9803 */
9804-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9805+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9806
9807 /* This decides where the kernel will search for a free chunk of vm
9808 * space during mmap's.
9809 */
9810 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9811- 0xc0000000 : 0xFFFFe000)
9812+ 0xc0000000 : 0xFFFFf000)
9813
9814 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9815 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9816@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9817 #define STACK_TOP_MAX TASK_SIZE_MAX
9818
9819 #define INIT_THREAD { \
9820- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9821+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9822 }
9823
9824 #define INIT_TSS { \
9825- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9826+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9827 }
9828
9829 /*
9830@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
9831 */
9832 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9833
9834+#ifdef CONFIG_PAX_SEGMEXEC
9835+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9836+#endif
9837+
9838 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9839
9840 /* Get/set a process' ability to use the timestamp counter instruction */
9841diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
9842index 3566454..4bdfb8c 100644
9843--- a/arch/x86/include/asm/ptrace.h
9844+++ b/arch/x86/include/asm/ptrace.h
9845@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
9846 }
9847
9848 /*
9849- * user_mode_vm(regs) determines whether a register set came from user mode.
9850+ * user_mode(regs) determines whether a register set came from user mode.
9851 * This is true if V8086 mode was enabled OR if the register set was from
9852 * protected mode with RPL-3 CS value. This tricky test checks that with
9853 * one comparison. Many places in the kernel can bypass this full check
9854- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9855+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9856+ * be used.
9857 */
9858-static inline int user_mode(struct pt_regs *regs)
9859+static inline int user_mode_novm(struct pt_regs *regs)
9860 {
9861 #ifdef CONFIG_X86_32
9862 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9863 #else
9864- return !!(regs->cs & 3);
9865+ return !!(regs->cs & SEGMENT_RPL_MASK);
9866 #endif
9867 }
9868
9869-static inline int user_mode_vm(struct pt_regs *regs)
9870+static inline int user_mode(struct pt_regs *regs)
9871 {
9872 #ifdef CONFIG_X86_32
9873 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9874 USER_RPL;
9875 #else
9876- return user_mode(regs);
9877+ return user_mode_novm(regs);
9878 #endif
9879 }
9880
9881@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
9882 #ifdef CONFIG_X86_64
9883 static inline bool user_64bit_mode(struct pt_regs *regs)
9884 {
9885+ unsigned long cs = regs->cs & 0xffff;
9886 #ifndef CONFIG_PARAVIRT
9887 /*
9888 * On non-paravirt systems, this is the only long mode CPL 3
9889 * selector. We do not allow long mode selectors in the LDT.
9890 */
9891- return regs->cs == __USER_CS;
9892+ return cs == __USER_CS;
9893 #else
9894 /* Headers are too twisted for this to go in paravirt.h. */
9895- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
9896+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
9897 #endif
9898 }
9899 #endif
9900diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
9901index 92f29706..a79cbbb 100644
9902--- a/arch/x86/include/asm/reboot.h
9903+++ b/arch/x86/include/asm/reboot.h
9904@@ -6,19 +6,19 @@
9905 struct pt_regs;
9906
9907 struct machine_ops {
9908- void (*restart)(char *cmd);
9909- void (*halt)(void);
9910- void (*power_off)(void);
9911+ void (* __noreturn restart)(char *cmd);
9912+ void (* __noreturn halt)(void);
9913+ void (* __noreturn power_off)(void);
9914 void (*shutdown)(void);
9915 void (*crash_shutdown)(struct pt_regs *);
9916- void (*emergency_restart)(void);
9917-};
9918+ void (* __noreturn emergency_restart)(void);
9919+} __no_const;
9920
9921 extern struct machine_ops machine_ops;
9922
9923 void native_machine_crash_shutdown(struct pt_regs *regs);
9924 void native_machine_shutdown(void);
9925-void machine_real_restart(unsigned int type);
9926+void machine_real_restart(unsigned int type) __noreturn;
9927 /* These must match dispatch_table in reboot_32.S */
9928 #define MRR_BIOS 0
9929 #define MRR_APM 1
9930diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
9931index 2dbe4a7..ce1db00 100644
9932--- a/arch/x86/include/asm/rwsem.h
9933+++ b/arch/x86/include/asm/rwsem.h
9934@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
9935 {
9936 asm volatile("# beginning down_read\n\t"
9937 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9938+
9939+#ifdef CONFIG_PAX_REFCOUNT
9940+ "jno 0f\n"
9941+ LOCK_PREFIX _ASM_DEC "(%1)\n"
9942+ "int $4\n0:\n"
9943+ _ASM_EXTABLE(0b, 0b)
9944+#endif
9945+
9946 /* adds 0x00000001 */
9947 " jns 1f\n"
9948 " call call_rwsem_down_read_failed\n"
9949@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
9950 "1:\n\t"
9951 " mov %1,%2\n\t"
9952 " add %3,%2\n\t"
9953+
9954+#ifdef CONFIG_PAX_REFCOUNT
9955+ "jno 0f\n"
9956+ "sub %3,%2\n"
9957+ "int $4\n0:\n"
9958+ _ASM_EXTABLE(0b, 0b)
9959+#endif
9960+
9961 " jle 2f\n\t"
9962 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9963 " jnz 1b\n\t"
9964@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
9965 long tmp;
9966 asm volatile("# beginning down_write\n\t"
9967 LOCK_PREFIX " xadd %1,(%2)\n\t"
9968+
9969+#ifdef CONFIG_PAX_REFCOUNT
9970+ "jno 0f\n"
9971+ "mov %1,(%2)\n"
9972+ "int $4\n0:\n"
9973+ _ASM_EXTABLE(0b, 0b)
9974+#endif
9975+
9976 /* adds 0xffff0001, returns the old value */
9977 " test %1,%1\n\t"
9978 /* was the count 0 before? */
9979@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
9980 long tmp;
9981 asm volatile("# beginning __up_read\n\t"
9982 LOCK_PREFIX " xadd %1,(%2)\n\t"
9983+
9984+#ifdef CONFIG_PAX_REFCOUNT
9985+ "jno 0f\n"
9986+ "mov %1,(%2)\n"
9987+ "int $4\n0:\n"
9988+ _ASM_EXTABLE(0b, 0b)
9989+#endif
9990+
9991 /* subtracts 1, returns the old value */
9992 " jns 1f\n\t"
9993 " call call_rwsem_wake\n" /* expects old value in %edx */
9994@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
9995 long tmp;
9996 asm volatile("# beginning __up_write\n\t"
9997 LOCK_PREFIX " xadd %1,(%2)\n\t"
9998+
9999+#ifdef CONFIG_PAX_REFCOUNT
10000+ "jno 0f\n"
10001+ "mov %1,(%2)\n"
10002+ "int $4\n0:\n"
10003+ _ASM_EXTABLE(0b, 0b)
10004+#endif
10005+
10006 /* subtracts 0xffff0001, returns the old value */
10007 " jns 1f\n\t"
10008 " call call_rwsem_wake\n" /* expects old value in %edx */
10009@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10010 {
10011 asm volatile("# beginning __downgrade_write\n\t"
10012 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10013+
10014+#ifdef CONFIG_PAX_REFCOUNT
10015+ "jno 0f\n"
10016+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10017+ "int $4\n0:\n"
10018+ _ASM_EXTABLE(0b, 0b)
10019+#endif
10020+
10021 /*
10022 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10023 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10024@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10025 */
10026 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10027 {
10028- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10029+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10030+
10031+#ifdef CONFIG_PAX_REFCOUNT
10032+ "jno 0f\n"
10033+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
10034+ "int $4\n0:\n"
10035+ _ASM_EXTABLE(0b, 0b)
10036+#endif
10037+
10038 : "+m" (sem->count)
10039 : "er" (delta));
10040 }
10041@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10042 */
10043 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
10044 {
10045- return delta + xadd(&sem->count, delta);
10046+ return delta + xadd_check_overflow(&sem->count, delta);
10047 }
10048
10049 #endif /* __KERNEL__ */
10050diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
10051index 5e64171..f58957e 100644
10052--- a/arch/x86/include/asm/segment.h
10053+++ b/arch/x86/include/asm/segment.h
10054@@ -64,10 +64,15 @@
10055 * 26 - ESPFIX small SS
10056 * 27 - per-cpu [ offset to per-cpu data area ]
10057 * 28 - stack_canary-20 [ for stack protector ]
10058- * 29 - unused
10059- * 30 - unused
10060+ * 29 - PCI BIOS CS
10061+ * 30 - PCI BIOS DS
10062 * 31 - TSS for double fault handler
10063 */
10064+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
10065+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
10066+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
10067+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
10068+
10069 #define GDT_ENTRY_TLS_MIN 6
10070 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
10071
10072@@ -79,6 +84,8 @@
10073
10074 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
10075
10076+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10077+
10078 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
10079
10080 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
10081@@ -104,6 +111,12 @@
10082 #define __KERNEL_STACK_CANARY 0
10083 #endif
10084
10085+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
10086+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10087+
10088+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
10089+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10090+
10091 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10092
10093 /*
10094@@ -141,7 +154,7 @@
10095 */
10096
10097 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10098-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10099+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10100
10101
10102 #else
10103@@ -165,6 +178,8 @@
10104 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10105 #define __USER32_DS __USER_DS
10106
10107+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10108+
10109 #define GDT_ENTRY_TSS 8 /* needs two entries */
10110 #define GDT_ENTRY_LDT 10 /* needs two entries */
10111 #define GDT_ENTRY_TLS_MIN 12
10112@@ -185,6 +200,7 @@
10113 #endif
10114
10115 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10116+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10117 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10118 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10119 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10120diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10121index 73b11bc..d4a3b63 100644
10122--- a/arch/x86/include/asm/smp.h
10123+++ b/arch/x86/include/asm/smp.h
10124@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10125 /* cpus sharing the last level cache: */
10126 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10127 DECLARE_PER_CPU(u16, cpu_llc_id);
10128-DECLARE_PER_CPU(int, cpu_number);
10129+DECLARE_PER_CPU(unsigned int, cpu_number);
10130
10131 static inline struct cpumask *cpu_sibling_mask(int cpu)
10132 {
10133@@ -77,7 +77,7 @@ struct smp_ops {
10134
10135 void (*send_call_func_ipi)(const struct cpumask *mask);
10136 void (*send_call_func_single_ipi)(int cpu);
10137-};
10138+} __no_const;
10139
10140 /* Globals due to paravirt */
10141 extern void set_cpu_sibling_map(int cpu);
10142@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10143 extern int safe_smp_processor_id(void);
10144
10145 #elif defined(CONFIG_X86_64_SMP)
10146-#define raw_smp_processor_id() (percpu_read(cpu_number))
10147-
10148-#define stack_smp_processor_id() \
10149-({ \
10150- struct thread_info *ti; \
10151- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10152- ti->cpu; \
10153-})
10154+#define raw_smp_processor_id() (percpu_read(cpu_number))
10155+#define stack_smp_processor_id() raw_smp_processor_id()
10156 #define safe_smp_processor_id() smp_processor_id()
10157
10158 #endif
10159diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10160index 972c260..43ab1fd 100644
10161--- a/arch/x86/include/asm/spinlock.h
10162+++ b/arch/x86/include/asm/spinlock.h
10163@@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10164 static inline void arch_read_lock(arch_rwlock_t *rw)
10165 {
10166 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10167+
10168+#ifdef CONFIG_PAX_REFCOUNT
10169+ "jno 0f\n"
10170+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10171+ "int $4\n0:\n"
10172+ _ASM_EXTABLE(0b, 0b)
10173+#endif
10174+
10175 "jns 1f\n"
10176 "call __read_lock_failed\n\t"
10177 "1:\n"
10178@@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10179 static inline void arch_write_lock(arch_rwlock_t *rw)
10180 {
10181 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10182+
10183+#ifdef CONFIG_PAX_REFCOUNT
10184+ "jno 0f\n"
10185+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10186+ "int $4\n0:\n"
10187+ _ASM_EXTABLE(0b, 0b)
10188+#endif
10189+
10190 "jz 1f\n"
10191 "call __write_lock_failed\n\t"
10192 "1:\n"
10193@@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10194
10195 static inline void arch_read_unlock(arch_rwlock_t *rw)
10196 {
10197- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10198+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10199+
10200+#ifdef CONFIG_PAX_REFCOUNT
10201+ "jno 0f\n"
10202+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10203+ "int $4\n0:\n"
10204+ _ASM_EXTABLE(0b, 0b)
10205+#endif
10206+
10207 :"+m" (rw->lock) : : "memory");
10208 }
10209
10210 static inline void arch_write_unlock(arch_rwlock_t *rw)
10211 {
10212- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10213+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10214+
10215+#ifdef CONFIG_PAX_REFCOUNT
10216+ "jno 0f\n"
10217+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10218+ "int $4\n0:\n"
10219+ _ASM_EXTABLE(0b, 0b)
10220+#endif
10221+
10222 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10223 }
10224
10225diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10226index 1575177..cb23f52 100644
10227--- a/arch/x86/include/asm/stackprotector.h
10228+++ b/arch/x86/include/asm/stackprotector.h
10229@@ -48,7 +48,7 @@
10230 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10231 */
10232 #define GDT_STACK_CANARY_INIT \
10233- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10234+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10235
10236 /*
10237 * Initialize the stackprotector canary value.
10238@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10239
10240 static inline void load_stack_canary_segment(void)
10241 {
10242-#ifdef CONFIG_X86_32
10243+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10244 asm volatile ("mov %0, %%gs" : : "r" (0));
10245 #endif
10246 }
10247diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10248index 70bbe39..4ae2bd4 100644
10249--- a/arch/x86/include/asm/stacktrace.h
10250+++ b/arch/x86/include/asm/stacktrace.h
10251@@ -11,28 +11,20 @@
10252
10253 extern int kstack_depth_to_print;
10254
10255-struct thread_info;
10256+struct task_struct;
10257 struct stacktrace_ops;
10258
10259-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10260- unsigned long *stack,
10261- unsigned long bp,
10262- const struct stacktrace_ops *ops,
10263- void *data,
10264- unsigned long *end,
10265- int *graph);
10266+typedef unsigned long walk_stack_t(struct task_struct *task,
10267+ void *stack_start,
10268+ unsigned long *stack,
10269+ unsigned long bp,
10270+ const struct stacktrace_ops *ops,
10271+ void *data,
10272+ unsigned long *end,
10273+ int *graph);
10274
10275-extern unsigned long
10276-print_context_stack(struct thread_info *tinfo,
10277- unsigned long *stack, unsigned long bp,
10278- const struct stacktrace_ops *ops, void *data,
10279- unsigned long *end, int *graph);
10280-
10281-extern unsigned long
10282-print_context_stack_bp(struct thread_info *tinfo,
10283- unsigned long *stack, unsigned long bp,
10284- const struct stacktrace_ops *ops, void *data,
10285- unsigned long *end, int *graph);
10286+extern walk_stack_t print_context_stack;
10287+extern walk_stack_t print_context_stack_bp;
10288
10289 /* Generic stack tracer with callbacks */
10290
10291@@ -40,7 +32,7 @@ struct stacktrace_ops {
10292 void (*address)(void *data, unsigned long address, int reliable);
10293 /* On negative return stop dumping */
10294 int (*stack)(void *data, char *name);
10295- walk_stack_t walk_stack;
10296+ walk_stack_t *walk_stack;
10297 };
10298
10299 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10300diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10301index cb23852..2dde194 100644
10302--- a/arch/x86/include/asm/sys_ia32.h
10303+++ b/arch/x86/include/asm/sys_ia32.h
10304@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10305 compat_sigset_t __user *, unsigned int);
10306 asmlinkage long sys32_alarm(unsigned int);
10307
10308-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10309+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10310 asmlinkage long sys32_sysfs(int, u32, u32);
10311
10312 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10313diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10314index 2d2f01c..f985723 100644
10315--- a/arch/x86/include/asm/system.h
10316+++ b/arch/x86/include/asm/system.h
10317@@ -129,7 +129,7 @@ do { \
10318 "call __switch_to\n\t" \
10319 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10320 __switch_canary \
10321- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10322+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10323 "movq %%rax,%%rdi\n\t" \
10324 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10325 "jnz ret_from_fork\n\t" \
10326@@ -140,7 +140,7 @@ do { \
10327 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10328 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10329 [_tif_fork] "i" (_TIF_FORK), \
10330- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10331+ [thread_info] "m" (current_tinfo), \
10332 [current_task] "m" (current_task) \
10333 __switch_canary_iparam \
10334 : "memory", "cc" __EXTRA_CLOBBER)
10335@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10336 {
10337 unsigned long __limit;
10338 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10339- return __limit + 1;
10340+ return __limit;
10341 }
10342
10343 static inline void native_clts(void)
10344@@ -397,13 +397,13 @@ void enable_hlt(void);
10345
10346 void cpu_idle_wait(void);
10347
10348-extern unsigned long arch_align_stack(unsigned long sp);
10349+#define arch_align_stack(x) ((x) & ~0xfUL)
10350 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10351
10352 void default_idle(void);
10353 bool set_pm_idle_to_default(void);
10354
10355-void stop_this_cpu(void *dummy);
10356+void stop_this_cpu(void *dummy) __noreturn;
10357
10358 /*
10359 * Force strict CPU ordering.
10360diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10361index a1fe5c1..ee326d8 100644
10362--- a/arch/x86/include/asm/thread_info.h
10363+++ b/arch/x86/include/asm/thread_info.h
10364@@ -10,6 +10,7 @@
10365 #include <linux/compiler.h>
10366 #include <asm/page.h>
10367 #include <asm/types.h>
10368+#include <asm/percpu.h>
10369
10370 /*
10371 * low level task data that entry.S needs immediate access to
10372@@ -24,7 +25,6 @@ struct exec_domain;
10373 #include <linux/atomic.h>
10374
10375 struct thread_info {
10376- struct task_struct *task; /* main task structure */
10377 struct exec_domain *exec_domain; /* execution domain */
10378 __u32 flags; /* low level flags */
10379 __u32 status; /* thread synchronous flags */
10380@@ -34,18 +34,12 @@ struct thread_info {
10381 mm_segment_t addr_limit;
10382 struct restart_block restart_block;
10383 void __user *sysenter_return;
10384-#ifdef CONFIG_X86_32
10385- unsigned long previous_esp; /* ESP of the previous stack in
10386- case of nested (IRQ) stacks
10387- */
10388- __u8 supervisor_stack[0];
10389-#endif
10390+ unsigned long lowest_stack;
10391 int uaccess_err;
10392 };
10393
10394-#define INIT_THREAD_INFO(tsk) \
10395+#define INIT_THREAD_INFO \
10396 { \
10397- .task = &tsk, \
10398 .exec_domain = &default_exec_domain, \
10399 .flags = 0, \
10400 .cpu = 0, \
10401@@ -56,7 +50,7 @@ struct thread_info {
10402 }, \
10403 }
10404
10405-#define init_thread_info (init_thread_union.thread_info)
10406+#define init_thread_info (init_thread_union.stack)
10407 #define init_stack (init_thread_union.stack)
10408
10409 #else /* !__ASSEMBLY__ */
10410@@ -170,45 +164,40 @@ struct thread_info {
10411 ret; \
10412 })
10413
10414-#ifdef CONFIG_X86_32
10415-
10416-#define STACK_WARN (THREAD_SIZE/8)
10417-/*
10418- * macros/functions for gaining access to the thread information structure
10419- *
10420- * preempt_count needs to be 1 initially, until the scheduler is functional.
10421- */
10422-#ifndef __ASSEMBLY__
10423-
10424-
10425-/* how to get the current stack pointer from C */
10426-register unsigned long current_stack_pointer asm("esp") __used;
10427-
10428-/* how to get the thread information struct from C */
10429-static inline struct thread_info *current_thread_info(void)
10430-{
10431- return (struct thread_info *)
10432- (current_stack_pointer & ~(THREAD_SIZE - 1));
10433-}
10434-
10435-#else /* !__ASSEMBLY__ */
10436-
10437+#ifdef __ASSEMBLY__
10438 /* how to get the thread information struct from ASM */
10439 #define GET_THREAD_INFO(reg) \
10440- movl $-THREAD_SIZE, reg; \
10441- andl %esp, reg
10442+ mov PER_CPU_VAR(current_tinfo), reg
10443
10444 /* use this one if reg already contains %esp */
10445-#define GET_THREAD_INFO_WITH_ESP(reg) \
10446- andl $-THREAD_SIZE, reg
10447+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10448+#else
10449+/* how to get the thread information struct from C */
10450+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10451+
10452+static __always_inline struct thread_info *current_thread_info(void)
10453+{
10454+ return percpu_read_stable(current_tinfo);
10455+}
10456+#endif
10457+
10458+#ifdef CONFIG_X86_32
10459+
10460+#define STACK_WARN (THREAD_SIZE/8)
10461+/*
10462+ * macros/functions for gaining access to the thread information structure
10463+ *
10464+ * preempt_count needs to be 1 initially, until the scheduler is functional.
10465+ */
10466+#ifndef __ASSEMBLY__
10467+
10468+/* how to get the current stack pointer from C */
10469+register unsigned long current_stack_pointer asm("esp") __used;
10470
10471 #endif
10472
10473 #else /* X86_32 */
10474
10475-#include <asm/percpu.h>
10476-#define KERNEL_STACK_OFFSET (5*8)
10477-
10478 /*
10479 * macros/functions for gaining access to the thread information structure
10480 * preempt_count needs to be 1 initially, until the scheduler is functional.
10481@@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10482 #ifndef __ASSEMBLY__
10483 DECLARE_PER_CPU(unsigned long, kernel_stack);
10484
10485-static inline struct thread_info *current_thread_info(void)
10486-{
10487- struct thread_info *ti;
10488- ti = (void *)(percpu_read_stable(kernel_stack) +
10489- KERNEL_STACK_OFFSET - THREAD_SIZE);
10490- return ti;
10491-}
10492-
10493-#else /* !__ASSEMBLY__ */
10494-
10495-/* how to get the thread information struct from ASM */
10496-#define GET_THREAD_INFO(reg) \
10497- movq PER_CPU_VAR(kernel_stack),reg ; \
10498- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10499-
10500+/* how to get the current stack pointer from C */
10501+register unsigned long current_stack_pointer asm("rsp") __used;
10502 #endif
10503
10504 #endif /* !X86_32 */
10505@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
10506 extern void free_thread_info(struct thread_info *ti);
10507 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10508 #define arch_task_cache_init arch_task_cache_init
10509+
10510+#define __HAVE_THREAD_FUNCTIONS
10511+#define task_thread_info(task) (&(task)->tinfo)
10512+#define task_stack_page(task) ((task)->stack)
10513+#define setup_thread_stack(p, org) do {} while (0)
10514+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10515+
10516+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10517+extern struct task_struct *alloc_task_struct_node(int node);
10518+extern void free_task_struct(struct task_struct *);
10519+
10520 #endif
10521 #endif /* _ASM_X86_THREAD_INFO_H */
10522diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10523index 36361bf..324f262 100644
10524--- a/arch/x86/include/asm/uaccess.h
10525+++ b/arch/x86/include/asm/uaccess.h
10526@@ -7,12 +7,15 @@
10527 #include <linux/compiler.h>
10528 #include <linux/thread_info.h>
10529 #include <linux/string.h>
10530+#include <linux/sched.h>
10531 #include <asm/asm.h>
10532 #include <asm/page.h>
10533
10534 #define VERIFY_READ 0
10535 #define VERIFY_WRITE 1
10536
10537+extern void check_object_size(const void *ptr, unsigned long n, bool to);
10538+
10539 /*
10540 * The fs value determines whether argument validity checking should be
10541 * performed or not. If get_fs() == USER_DS, checking is performed, with
10542@@ -28,7 +31,12 @@
10543
10544 #define get_ds() (KERNEL_DS)
10545 #define get_fs() (current_thread_info()->addr_limit)
10546+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10547+void __set_fs(mm_segment_t x);
10548+void set_fs(mm_segment_t x);
10549+#else
10550 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10551+#endif
10552
10553 #define segment_eq(a, b) ((a).seg == (b).seg)
10554
10555@@ -76,7 +84,33 @@
10556 * checks that the pointer is in the user space range - after calling
10557 * this function, memory access functions may still return -EFAULT.
10558 */
10559-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10560+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10561+#define access_ok(type, addr, size) \
10562+({ \
10563+ long __size = size; \
10564+ unsigned long __addr = (unsigned long)addr; \
10565+ unsigned long __addr_ao = __addr & PAGE_MASK; \
10566+ unsigned long __end_ao = __addr + __size - 1; \
10567+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10568+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10569+ while(__addr_ao <= __end_ao) { \
10570+ char __c_ao; \
10571+ __addr_ao += PAGE_SIZE; \
10572+ if (__size > PAGE_SIZE) \
10573+ cond_resched(); \
10574+ if (__get_user(__c_ao, (char __user *)__addr)) \
10575+ break; \
10576+ if (type != VERIFY_WRITE) { \
10577+ __addr = __addr_ao; \
10578+ continue; \
10579+ } \
10580+ if (__put_user(__c_ao, (char __user *)__addr)) \
10581+ break; \
10582+ __addr = __addr_ao; \
10583+ } \
10584+ } \
10585+ __ret_ao; \
10586+})
10587
10588 /*
10589 * The exception table consists of pairs of addresses: the first is the
10590@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10591 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10592 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10593
10594-
10595+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10596+#define __copyuser_seg "gs;"
10597+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10598+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10599+#else
10600+#define __copyuser_seg
10601+#define __COPYUSER_SET_ES
10602+#define __COPYUSER_RESTORE_ES
10603+#endif
10604
10605 #ifdef CONFIG_X86_32
10606 #define __put_user_asm_u64(x, addr, err, errret) \
10607- asm volatile("1: movl %%eax,0(%2)\n" \
10608- "2: movl %%edx,4(%2)\n" \
10609+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10610+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10611 "3:\n" \
10612 ".section .fixup,\"ax\"\n" \
10613 "4: movl %3,%0\n" \
10614@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10615 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10616
10617 #define __put_user_asm_ex_u64(x, addr) \
10618- asm volatile("1: movl %%eax,0(%1)\n" \
10619- "2: movl %%edx,4(%1)\n" \
10620+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10621+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10622 "3:\n" \
10623 _ASM_EXTABLE(1b, 2b - 1b) \
10624 _ASM_EXTABLE(2b, 3b - 2b) \
10625@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10626 __typeof__(*(ptr)) __pu_val; \
10627 __chk_user_ptr(ptr); \
10628 might_fault(); \
10629- __pu_val = x; \
10630+ __pu_val = (x); \
10631 switch (sizeof(*(ptr))) { \
10632 case 1: \
10633 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10634@@ -373,7 +415,7 @@ do { \
10635 } while (0)
10636
10637 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10638- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10639+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10640 "2:\n" \
10641 ".section .fixup,\"ax\"\n" \
10642 "3: mov %3,%0\n" \
10643@@ -381,7 +423,7 @@ do { \
10644 " jmp 2b\n" \
10645 ".previous\n" \
10646 _ASM_EXTABLE(1b, 3b) \
10647- : "=r" (err), ltype(x) \
10648+ : "=r" (err), ltype (x) \
10649 : "m" (__m(addr)), "i" (errret), "0" (err))
10650
10651 #define __get_user_size_ex(x, ptr, size) \
10652@@ -406,7 +448,7 @@ do { \
10653 } while (0)
10654
10655 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10656- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10657+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10658 "2:\n" \
10659 _ASM_EXTABLE(1b, 2b - 1b) \
10660 : ltype(x) : "m" (__m(addr)))
10661@@ -423,13 +465,24 @@ do { \
10662 int __gu_err; \
10663 unsigned long __gu_val; \
10664 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10665- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10666+ (x) = (__typeof__(*(ptr)))__gu_val; \
10667 __gu_err; \
10668 })
10669
10670 /* FIXME: this hack is definitely wrong -AK */
10671 struct __large_struct { unsigned long buf[100]; };
10672-#define __m(x) (*(struct __large_struct __user *)(x))
10673+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10674+#define ____m(x) \
10675+({ \
10676+ unsigned long ____x = (unsigned long)(x); \
10677+ if (____x < PAX_USER_SHADOW_BASE) \
10678+ ____x += PAX_USER_SHADOW_BASE; \
10679+ (void __user *)____x; \
10680+})
10681+#else
10682+#define ____m(x) (x)
10683+#endif
10684+#define __m(x) (*(struct __large_struct __user *)____m(x))
10685
10686 /*
10687 * Tell gcc we read from memory instead of writing: this is because
10688@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10689 * aliasing issues.
10690 */
10691 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10692- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10693+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10694 "2:\n" \
10695 ".section .fixup,\"ax\"\n" \
10696 "3: mov %3,%0\n" \
10697@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10698 ".previous\n" \
10699 _ASM_EXTABLE(1b, 3b) \
10700 : "=r"(err) \
10701- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10702+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10703
10704 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10705- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10706+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10707 "2:\n" \
10708 _ASM_EXTABLE(1b, 2b - 1b) \
10709 : : ltype(x), "m" (__m(addr)))
10710@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10711 * On error, the variable @x is set to zero.
10712 */
10713
10714+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10715+#define __get_user(x, ptr) get_user((x), (ptr))
10716+#else
10717 #define __get_user(x, ptr) \
10718 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10719+#endif
10720
10721 /**
10722 * __put_user: - Write a simple value into user space, with less checking.
10723@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10724 * Returns zero on success, or -EFAULT on error.
10725 */
10726
10727+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10728+#define __put_user(x, ptr) put_user((x), (ptr))
10729+#else
10730 #define __put_user(x, ptr) \
10731 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10732+#endif
10733
10734 #define __get_user_unaligned __get_user
10735 #define __put_user_unaligned __put_user
10736@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10737 #define get_user_ex(x, ptr) do { \
10738 unsigned long __gue_val; \
10739 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10740- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10741+ (x) = (__typeof__(*(ptr)))__gue_val; \
10742 } while (0)
10743
10744 #ifdef CONFIG_X86_WP_WORKS_OK
10745diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10746index 566e803..b9521e9 100644
10747--- a/arch/x86/include/asm/uaccess_32.h
10748+++ b/arch/x86/include/asm/uaccess_32.h
10749@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10750 static __always_inline unsigned long __must_check
10751 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10752 {
10753+ if ((long)n < 0)
10754+ return n;
10755+
10756 if (__builtin_constant_p(n)) {
10757 unsigned long ret;
10758
10759@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10760 return ret;
10761 }
10762 }
10763+ if (!__builtin_constant_p(n))
10764+ check_object_size(from, n, true);
10765 return __copy_to_user_ll(to, from, n);
10766 }
10767
10768@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
10769 __copy_to_user(void __user *to, const void *from, unsigned long n)
10770 {
10771 might_fault();
10772+
10773 return __copy_to_user_inatomic(to, from, n);
10774 }
10775
10776 static __always_inline unsigned long
10777 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10778 {
10779+ if ((long)n < 0)
10780+ return n;
10781+
10782 /* Avoid zeroing the tail if the copy fails..
10783 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10784 * but as the zeroing behaviour is only significant when n is not
10785@@ -137,6 +146,10 @@ static __always_inline unsigned long
10786 __copy_from_user(void *to, const void __user *from, unsigned long n)
10787 {
10788 might_fault();
10789+
10790+ if ((long)n < 0)
10791+ return n;
10792+
10793 if (__builtin_constant_p(n)) {
10794 unsigned long ret;
10795
10796@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
10797 return ret;
10798 }
10799 }
10800+ if (!__builtin_constant_p(n))
10801+ check_object_size(to, n, false);
10802 return __copy_from_user_ll(to, from, n);
10803 }
10804
10805@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
10806 const void __user *from, unsigned long n)
10807 {
10808 might_fault();
10809+
10810+ if ((long)n < 0)
10811+ return n;
10812+
10813 if (__builtin_constant_p(n)) {
10814 unsigned long ret;
10815
10816@@ -181,15 +200,19 @@ static __always_inline unsigned long
10817 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10818 unsigned long n)
10819 {
10820- return __copy_from_user_ll_nocache_nozero(to, from, n);
10821+ if ((long)n < 0)
10822+ return n;
10823+
10824+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10825 }
10826
10827-unsigned long __must_check copy_to_user(void __user *to,
10828- const void *from, unsigned long n);
10829-unsigned long __must_check _copy_from_user(void *to,
10830- const void __user *from,
10831- unsigned long n);
10832-
10833+extern void copy_to_user_overflow(void)
10834+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10835+ __compiletime_error("copy_to_user() buffer size is not provably correct")
10836+#else
10837+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
10838+#endif
10839+;
10840
10841 extern void copy_from_user_overflow(void)
10842 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10843@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
10844 #endif
10845 ;
10846
10847-static inline unsigned long __must_check copy_from_user(void *to,
10848- const void __user *from,
10849- unsigned long n)
10850+/**
10851+ * copy_to_user: - Copy a block of data into user space.
10852+ * @to: Destination address, in user space.
10853+ * @from: Source address, in kernel space.
10854+ * @n: Number of bytes to copy.
10855+ *
10856+ * Context: User context only. This function may sleep.
10857+ *
10858+ * Copy data from kernel space to user space.
10859+ *
10860+ * Returns number of bytes that could not be copied.
10861+ * On success, this will be zero.
10862+ */
10863+static inline unsigned long __must_check
10864+copy_to_user(void __user *to, const void *from, unsigned long n)
10865+{
10866+ int sz = __compiletime_object_size(from);
10867+
10868+ if (unlikely(sz != -1 && sz < n))
10869+ copy_to_user_overflow();
10870+ else if (access_ok(VERIFY_WRITE, to, n))
10871+ n = __copy_to_user(to, from, n);
10872+ return n;
10873+}
10874+
10875+/**
10876+ * copy_from_user: - Copy a block of data from user space.
10877+ * @to: Destination address, in kernel space.
10878+ * @from: Source address, in user space.
10879+ * @n: Number of bytes to copy.
10880+ *
10881+ * Context: User context only. This function may sleep.
10882+ *
10883+ * Copy data from user space to kernel space.
10884+ *
10885+ * Returns number of bytes that could not be copied.
10886+ * On success, this will be zero.
10887+ *
10888+ * If some data could not be copied, this function will pad the copied
10889+ * data to the requested size using zero bytes.
10890+ */
10891+static inline unsigned long __must_check
10892+copy_from_user(void *to, const void __user *from, unsigned long n)
10893 {
10894 int sz = __compiletime_object_size(to);
10895
10896- if (likely(sz == -1 || sz >= n))
10897- n = _copy_from_user(to, from, n);
10898- else
10899+ if (unlikely(sz != -1 && sz < n))
10900 copy_from_user_overflow();
10901-
10902+ else if (access_ok(VERIFY_READ, from, n))
10903+ n = __copy_from_user(to, from, n);
10904+ else if ((long)n > 0) {
10905+ if (!__builtin_constant_p(n))
10906+ check_object_size(to, n, false);
10907+ memset(to, 0, n);
10908+ }
10909 return n;
10910 }
10911
10912diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
10913index 1c66d30..23ab77d 100644
10914--- a/arch/x86/include/asm/uaccess_64.h
10915+++ b/arch/x86/include/asm/uaccess_64.h
10916@@ -10,6 +10,9 @@
10917 #include <asm/alternative.h>
10918 #include <asm/cpufeature.h>
10919 #include <asm/page.h>
10920+#include <asm/pgtable.h>
10921+
10922+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10923
10924 /*
10925 * Copy To/From Userspace
10926@@ -17,12 +20,12 @@
10927
10928 /* Handles exceptions in both to and from, but doesn't do access_ok */
10929 __must_check unsigned long
10930-copy_user_generic_string(void *to, const void *from, unsigned len);
10931+copy_user_generic_string(void *to, const void *from, unsigned long len);
10932 __must_check unsigned long
10933-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
10934+copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
10935
10936 static __always_inline __must_check unsigned long
10937-copy_user_generic(void *to, const void *from, unsigned len)
10938+copy_user_generic(void *to, const void *from, unsigned long len)
10939 {
10940 unsigned ret;
10941
10942@@ -36,138 +39,222 @@ copy_user_generic(void *to, const void *from, unsigned len)
10943 return ret;
10944 }
10945
10946+static __always_inline __must_check unsigned long
10947+__copy_to_user(void __user *to, const void *from, unsigned long len);
10948+static __always_inline __must_check unsigned long
10949+__copy_from_user(void *to, const void __user *from, unsigned long len);
10950 __must_check unsigned long
10951-_copy_to_user(void __user *to, const void *from, unsigned len);
10952-__must_check unsigned long
10953-_copy_from_user(void *to, const void __user *from, unsigned len);
10954-__must_check unsigned long
10955-copy_in_user(void __user *to, const void __user *from, unsigned len);
10956+copy_in_user(void __user *to, const void __user *from, unsigned long len);
10957
10958 static inline unsigned long __must_check copy_from_user(void *to,
10959 const void __user *from,
10960 unsigned long n)
10961 {
10962- int sz = __compiletime_object_size(to);
10963-
10964 might_fault();
10965- if (likely(sz == -1 || sz >= n))
10966- n = _copy_from_user(to, from, n);
10967-#ifdef CONFIG_DEBUG_VM
10968- else
10969- WARN(1, "Buffer overflow detected!\n");
10970-#endif
10971+
10972+ if (access_ok(VERIFY_READ, from, n))
10973+ n = __copy_from_user(to, from, n);
10974+ else if (n < INT_MAX) {
10975+ if (!__builtin_constant_p(n))
10976+ check_object_size(to, n, false);
10977+ memset(to, 0, n);
10978+ }
10979 return n;
10980 }
10981
10982 static __always_inline __must_check
10983-int copy_to_user(void __user *dst, const void *src, unsigned size)
10984+int copy_to_user(void __user *dst, const void *src, unsigned long size)
10985 {
10986 might_fault();
10987
10988- return _copy_to_user(dst, src, size);
10989+ if (access_ok(VERIFY_WRITE, dst, size))
10990+ size = __copy_to_user(dst, src, size);
10991+ return size;
10992 }
10993
10994 static __always_inline __must_check
10995-int __copy_from_user(void *dst, const void __user *src, unsigned size)
10996+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
10997 {
10998- int ret = 0;
10999+ int sz = __compiletime_object_size(dst);
11000+ unsigned ret = 0;
11001
11002 might_fault();
11003- if (!__builtin_constant_p(size))
11004- return copy_user_generic(dst, (__force void *)src, size);
11005+
11006+ if (size > INT_MAX)
11007+ return size;
11008+
11009+#ifdef CONFIG_PAX_MEMORY_UDEREF
11010+ if (!__access_ok(VERIFY_READ, src, size))
11011+ return size;
11012+#endif
11013+
11014+ if (unlikely(sz != -1 && sz < size)) {
11015+#ifdef CONFIG_DEBUG_VM
11016+ WARN(1, "Buffer overflow detected!\n");
11017+#endif
11018+ return size;
11019+ }
11020+
11021+ if (!__builtin_constant_p(size)) {
11022+ check_object_size(dst, size, false);
11023+
11024+#ifdef CONFIG_PAX_MEMORY_UDEREF
11025+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11026+ src += PAX_USER_SHADOW_BASE;
11027+#endif
11028+
11029+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11030+ }
11031 switch (size) {
11032- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
11033+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
11034 ret, "b", "b", "=q", 1);
11035 return ret;
11036- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
11037+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
11038 ret, "w", "w", "=r", 2);
11039 return ret;
11040- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
11041+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
11042 ret, "l", "k", "=r", 4);
11043 return ret;
11044- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
11045+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11046 ret, "q", "", "=r", 8);
11047 return ret;
11048 case 10:
11049- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11050+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11051 ret, "q", "", "=r", 10);
11052 if (unlikely(ret))
11053 return ret;
11054 __get_user_asm(*(u16 *)(8 + (char *)dst),
11055- (u16 __user *)(8 + (char __user *)src),
11056+ (const u16 __user *)(8 + (const char __user *)src),
11057 ret, "w", "w", "=r", 2);
11058 return ret;
11059 case 16:
11060- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11061+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11062 ret, "q", "", "=r", 16);
11063 if (unlikely(ret))
11064 return ret;
11065 __get_user_asm(*(u64 *)(8 + (char *)dst),
11066- (u64 __user *)(8 + (char __user *)src),
11067+ (const u64 __user *)(8 + (const char __user *)src),
11068 ret, "q", "", "=r", 8);
11069 return ret;
11070 default:
11071- return copy_user_generic(dst, (__force void *)src, size);
11072+
11073+#ifdef CONFIG_PAX_MEMORY_UDEREF
11074+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11075+ src += PAX_USER_SHADOW_BASE;
11076+#endif
11077+
11078+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11079 }
11080 }
11081
11082 static __always_inline __must_check
11083-int __copy_to_user(void __user *dst, const void *src, unsigned size)
11084+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
11085 {
11086- int ret = 0;
11087+ int sz = __compiletime_object_size(src);
11088+ unsigned ret = 0;
11089
11090 might_fault();
11091- if (!__builtin_constant_p(size))
11092- return copy_user_generic((__force void *)dst, src, size);
11093+
11094+ if (size > INT_MAX)
11095+ return size;
11096+
11097+#ifdef CONFIG_PAX_MEMORY_UDEREF
11098+ if (!__access_ok(VERIFY_WRITE, dst, size))
11099+ return size;
11100+#endif
11101+
11102+ if (unlikely(sz != -1 && sz < size)) {
11103+#ifdef CONFIG_DEBUG_VM
11104+ WARN(1, "Buffer overflow detected!\n");
11105+#endif
11106+ return size;
11107+ }
11108+
11109+ if (!__builtin_constant_p(size)) {
11110+ check_object_size(src, size, true);
11111+
11112+#ifdef CONFIG_PAX_MEMORY_UDEREF
11113+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11114+ dst += PAX_USER_SHADOW_BASE;
11115+#endif
11116+
11117+ return copy_user_generic((__force_kernel void *)dst, src, size);
11118+ }
11119 switch (size) {
11120- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11121+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11122 ret, "b", "b", "iq", 1);
11123 return ret;
11124- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11125+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11126 ret, "w", "w", "ir", 2);
11127 return ret;
11128- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11129+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11130 ret, "l", "k", "ir", 4);
11131 return ret;
11132- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11133+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11134 ret, "q", "", "er", 8);
11135 return ret;
11136 case 10:
11137- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11138+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11139 ret, "q", "", "er", 10);
11140 if (unlikely(ret))
11141 return ret;
11142 asm("":::"memory");
11143- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11144+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11145 ret, "w", "w", "ir", 2);
11146 return ret;
11147 case 16:
11148- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11149+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11150 ret, "q", "", "er", 16);
11151 if (unlikely(ret))
11152 return ret;
11153 asm("":::"memory");
11154- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11155+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11156 ret, "q", "", "er", 8);
11157 return ret;
11158 default:
11159- return copy_user_generic((__force void *)dst, src, size);
11160+
11161+#ifdef CONFIG_PAX_MEMORY_UDEREF
11162+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11163+ dst += PAX_USER_SHADOW_BASE;
11164+#endif
11165+
11166+ return copy_user_generic((__force_kernel void *)dst, src, size);
11167 }
11168 }
11169
11170 static __always_inline __must_check
11171-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11172+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11173 {
11174- int ret = 0;
11175+ unsigned ret = 0;
11176
11177 might_fault();
11178- if (!__builtin_constant_p(size))
11179- return copy_user_generic((__force void *)dst,
11180- (__force void *)src, size);
11181+
11182+ if (size > INT_MAX)
11183+ return size;
11184+
11185+#ifdef CONFIG_PAX_MEMORY_UDEREF
11186+ if (!__access_ok(VERIFY_READ, src, size))
11187+ return size;
11188+ if (!__access_ok(VERIFY_WRITE, dst, size))
11189+ return size;
11190+#endif
11191+
11192+ if (!__builtin_constant_p(size)) {
11193+
11194+#ifdef CONFIG_PAX_MEMORY_UDEREF
11195+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11196+ src += PAX_USER_SHADOW_BASE;
11197+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11198+ dst += PAX_USER_SHADOW_BASE;
11199+#endif
11200+
11201+ return copy_user_generic((__force_kernel void *)dst,
11202+ (__force_kernel const void *)src, size);
11203+ }
11204 switch (size) {
11205 case 1: {
11206 u8 tmp;
11207- __get_user_asm(tmp, (u8 __user *)src,
11208+ __get_user_asm(tmp, (const u8 __user *)src,
11209 ret, "b", "b", "=q", 1);
11210 if (likely(!ret))
11211 __put_user_asm(tmp, (u8 __user *)dst,
11212@@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11213 }
11214 case 2: {
11215 u16 tmp;
11216- __get_user_asm(tmp, (u16 __user *)src,
11217+ __get_user_asm(tmp, (const u16 __user *)src,
11218 ret, "w", "w", "=r", 2);
11219 if (likely(!ret))
11220 __put_user_asm(tmp, (u16 __user *)dst,
11221@@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11222
11223 case 4: {
11224 u32 tmp;
11225- __get_user_asm(tmp, (u32 __user *)src,
11226+ __get_user_asm(tmp, (const u32 __user *)src,
11227 ret, "l", "k", "=r", 4);
11228 if (likely(!ret))
11229 __put_user_asm(tmp, (u32 __user *)dst,
11230@@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11231 }
11232 case 8: {
11233 u64 tmp;
11234- __get_user_asm(tmp, (u64 __user *)src,
11235+ __get_user_asm(tmp, (const u64 __user *)src,
11236 ret, "q", "", "=r", 8);
11237 if (likely(!ret))
11238 __put_user_asm(tmp, (u64 __user *)dst,
11239@@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11240 return ret;
11241 }
11242 default:
11243- return copy_user_generic((__force void *)dst,
11244- (__force void *)src, size);
11245+
11246+#ifdef CONFIG_PAX_MEMORY_UDEREF
11247+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11248+ src += PAX_USER_SHADOW_BASE;
11249+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11250+ dst += PAX_USER_SHADOW_BASE;
11251+#endif
11252+
11253+ return copy_user_generic((__force_kernel void *)dst,
11254+ (__force_kernel const void *)src, size);
11255 }
11256 }
11257
11258@@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11259 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11260
11261 static __must_check __always_inline int
11262-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11263+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11264 {
11265- return copy_user_generic(dst, (__force const void *)src, size);
11266+ if (size > INT_MAX)
11267+ return size;
11268+
11269+#ifdef CONFIG_PAX_MEMORY_UDEREF
11270+ if (!__access_ok(VERIFY_READ, src, size))
11271+ return size;
11272+
11273+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11274+ src += PAX_USER_SHADOW_BASE;
11275+#endif
11276+
11277+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11278 }
11279
11280-static __must_check __always_inline int
11281-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11282+static __must_check __always_inline unsigned long
11283+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11284 {
11285- return copy_user_generic((__force void *)dst, src, size);
11286+ if (size > INT_MAX)
11287+ return size;
11288+
11289+#ifdef CONFIG_PAX_MEMORY_UDEREF
11290+ if (!__access_ok(VERIFY_WRITE, dst, size))
11291+ return size;
11292+
11293+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11294+ dst += PAX_USER_SHADOW_BASE;
11295+#endif
11296+
11297+ return copy_user_generic((__force_kernel void *)dst, src, size);
11298 }
11299
11300-extern long __copy_user_nocache(void *dst, const void __user *src,
11301- unsigned size, int zerorest);
11302+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11303+ unsigned long size, int zerorest);
11304
11305-static inline int
11306-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11307+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11308 {
11309 might_sleep();
11310+
11311+ if (size > INT_MAX)
11312+ return size;
11313+
11314+#ifdef CONFIG_PAX_MEMORY_UDEREF
11315+ if (!__access_ok(VERIFY_READ, src, size))
11316+ return size;
11317+#endif
11318+
11319 return __copy_user_nocache(dst, src, size, 1);
11320 }
11321
11322-static inline int
11323-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11324- unsigned size)
11325+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11326+ unsigned long size)
11327 {
11328+ if (size > INT_MAX)
11329+ return size;
11330+
11331+#ifdef CONFIG_PAX_MEMORY_UDEREF
11332+ if (!__access_ok(VERIFY_READ, src, size))
11333+ return size;
11334+#endif
11335+
11336 return __copy_user_nocache(dst, src, size, 0);
11337 }
11338
11339-unsigned long
11340-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11341+extern unsigned long
11342+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11343
11344 #endif /* _ASM_X86_UACCESS_64_H */
11345diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11346index bb05228..d763d5b 100644
11347--- a/arch/x86/include/asm/vdso.h
11348+++ b/arch/x86/include/asm/vdso.h
11349@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11350 #define VDSO32_SYMBOL(base, name) \
11351 ({ \
11352 extern const char VDSO32_##name[]; \
11353- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11354+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11355 })
11356 #endif
11357
11358diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11359index 1971e65..1e3559b 100644
11360--- a/arch/x86/include/asm/x86_init.h
11361+++ b/arch/x86/include/asm/x86_init.h
11362@@ -28,7 +28,7 @@ struct x86_init_mpparse {
11363 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11364 void (*find_smp_config)(void);
11365 void (*get_smp_config)(unsigned int early);
11366-};
11367+} __no_const;
11368
11369 /**
11370 * struct x86_init_resources - platform specific resource related ops
11371@@ -42,7 +42,7 @@ struct x86_init_resources {
11372 void (*probe_roms)(void);
11373 void (*reserve_resources)(void);
11374 char *(*memory_setup)(void);
11375-};
11376+} __no_const;
11377
11378 /**
11379 * struct x86_init_irqs - platform specific interrupt setup
11380@@ -55,7 +55,7 @@ struct x86_init_irqs {
11381 void (*pre_vector_init)(void);
11382 void (*intr_init)(void);
11383 void (*trap_init)(void);
11384-};
11385+} __no_const;
11386
11387 /**
11388 * struct x86_init_oem - oem platform specific customizing functions
11389@@ -65,7 +65,7 @@ struct x86_init_irqs {
11390 struct x86_init_oem {
11391 void (*arch_setup)(void);
11392 void (*banner)(void);
11393-};
11394+} __no_const;
11395
11396 /**
11397 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11398@@ -76,7 +76,7 @@ struct x86_init_oem {
11399 */
11400 struct x86_init_mapping {
11401 void (*pagetable_reserve)(u64 start, u64 end);
11402-};
11403+} __no_const;
11404
11405 /**
11406 * struct x86_init_paging - platform specific paging functions
11407@@ -86,7 +86,7 @@ struct x86_init_mapping {
11408 struct x86_init_paging {
11409 void (*pagetable_setup_start)(pgd_t *base);
11410 void (*pagetable_setup_done)(pgd_t *base);
11411-};
11412+} __no_const;
11413
11414 /**
11415 * struct x86_init_timers - platform specific timer setup
11416@@ -101,7 +101,7 @@ struct x86_init_timers {
11417 void (*tsc_pre_init)(void);
11418 void (*timer_init)(void);
11419 void (*wallclock_init)(void);
11420-};
11421+} __no_const;
11422
11423 /**
11424 * struct x86_init_iommu - platform specific iommu setup
11425@@ -109,7 +109,7 @@ struct x86_init_timers {
11426 */
11427 struct x86_init_iommu {
11428 int (*iommu_init)(void);
11429-};
11430+} __no_const;
11431
11432 /**
11433 * struct x86_init_pci - platform specific pci init functions
11434@@ -123,7 +123,7 @@ struct x86_init_pci {
11435 int (*init)(void);
11436 void (*init_irq)(void);
11437 void (*fixup_irqs)(void);
11438-};
11439+} __no_const;
11440
11441 /**
11442 * struct x86_init_ops - functions for platform specific setup
11443@@ -139,7 +139,7 @@ struct x86_init_ops {
11444 struct x86_init_timers timers;
11445 struct x86_init_iommu iommu;
11446 struct x86_init_pci pci;
11447-};
11448+} __no_const;
11449
11450 /**
11451 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11452@@ -147,7 +147,7 @@ struct x86_init_ops {
11453 */
11454 struct x86_cpuinit_ops {
11455 void (*setup_percpu_clockev)(void);
11456-};
11457+} __no_const;
11458
11459 /**
11460 * struct x86_platform_ops - platform specific runtime functions
11461@@ -169,7 +169,7 @@ struct x86_platform_ops {
11462 void (*nmi_init)(void);
11463 unsigned char (*get_nmi_reason)(void);
11464 int (*i8042_detect)(void);
11465-};
11466+} __no_const;
11467
11468 struct pci_dev;
11469
11470@@ -177,7 +177,7 @@ struct x86_msi_ops {
11471 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11472 void (*teardown_msi_irq)(unsigned int irq);
11473 void (*teardown_msi_irqs)(struct pci_dev *dev);
11474-};
11475+} __no_const;
11476
11477 extern struct x86_init_ops x86_init;
11478 extern struct x86_cpuinit_ops x86_cpuinit;
11479diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11480index c6ce245..ffbdab7 100644
11481--- a/arch/x86/include/asm/xsave.h
11482+++ b/arch/x86/include/asm/xsave.h
11483@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11484 {
11485 int err;
11486
11487+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11488+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11489+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11490+#endif
11491+
11492 /*
11493 * Clear the xsave header first, so that reserved fields are
11494 * initialized to zero.
11495@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11496 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11497 {
11498 int err;
11499- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11500+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11501 u32 lmask = mask;
11502 u32 hmask = mask >> 32;
11503
11504+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11505+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11506+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11507+#endif
11508+
11509 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11510 "2:\n"
11511 ".section .fixup,\"ax\"\n"
11512diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11513index 6a564ac..9b1340c 100644
11514--- a/arch/x86/kernel/acpi/realmode/Makefile
11515+++ b/arch/x86/kernel/acpi/realmode/Makefile
11516@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11517 $(call cc-option, -fno-stack-protector) \
11518 $(call cc-option, -mpreferred-stack-boundary=2)
11519 KBUILD_CFLAGS += $(call cc-option, -m32)
11520+ifdef CONSTIFY_PLUGIN
11521+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11522+endif
11523 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11524 GCOV_PROFILE := n
11525
11526diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11527index b4fd836..4358fe3 100644
11528--- a/arch/x86/kernel/acpi/realmode/wakeup.S
11529+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11530@@ -108,6 +108,9 @@ wakeup_code:
11531 /* Do any other stuff... */
11532
11533 #ifndef CONFIG_64BIT
11534+ /* Recheck NX bit overrides (64bit path does this in trampoline */
11535+ call verify_cpu
11536+
11537 /* This could also be done in C code... */
11538 movl pmode_cr3, %eax
11539 movl %eax, %cr3
11540@@ -131,6 +134,7 @@ wakeup_code:
11541 movl pmode_cr0, %eax
11542 movl %eax, %cr0
11543 jmp pmode_return
11544+# include "../../verify_cpu.S"
11545 #else
11546 pushw $0
11547 pushw trampoline_segment
11548diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11549index 103b6ab..2004d0a 100644
11550--- a/arch/x86/kernel/acpi/sleep.c
11551+++ b/arch/x86/kernel/acpi/sleep.c
11552@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11553 header->trampoline_segment = trampoline_address() >> 4;
11554 #ifdef CONFIG_SMP
11555 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11556+
11557+ pax_open_kernel();
11558 early_gdt_descr.address =
11559 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11560+ pax_close_kernel();
11561+
11562 initial_gs = per_cpu_offset(smp_processor_id());
11563 #endif
11564 initial_code = (unsigned long)wakeup_long64;
11565diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11566index 13ab720..95d5442 100644
11567--- a/arch/x86/kernel/acpi/wakeup_32.S
11568+++ b/arch/x86/kernel/acpi/wakeup_32.S
11569@@ -30,13 +30,11 @@ wakeup_pmode_return:
11570 # and restore the stack ... but you need gdt for this to work
11571 movl saved_context_esp, %esp
11572
11573- movl %cs:saved_magic, %eax
11574- cmpl $0x12345678, %eax
11575+ cmpl $0x12345678, saved_magic
11576 jne bogus_magic
11577
11578 # jump to place where we left off
11579- movl saved_eip, %eax
11580- jmp *%eax
11581+ jmp *(saved_eip)
11582
11583 bogus_magic:
11584 jmp bogus_magic
11585diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11586index 1f84794..e23f862 100644
11587--- a/arch/x86/kernel/alternative.c
11588+++ b/arch/x86/kernel/alternative.c
11589@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11590 */
11591 for (a = start; a < end; a++) {
11592 instr = (u8 *)&a->instr_offset + a->instr_offset;
11593+
11594+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11595+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11596+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11597+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11598+#endif
11599+
11600 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11601 BUG_ON(a->replacementlen > a->instrlen);
11602 BUG_ON(a->instrlen > sizeof(insnbuf));
11603@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11604 for (poff = start; poff < end; poff++) {
11605 u8 *ptr = (u8 *)poff + *poff;
11606
11607+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11608+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11609+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11610+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11611+#endif
11612+
11613 if (!*poff || ptr < text || ptr >= text_end)
11614 continue;
11615 /* turn DS segment override prefix into lock prefix */
11616- if (*ptr == 0x3e)
11617+ if (*ktla_ktva(ptr) == 0x3e)
11618 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11619 };
11620 mutex_unlock(&text_mutex);
11621@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11622 for (poff = start; poff < end; poff++) {
11623 u8 *ptr = (u8 *)poff + *poff;
11624
11625+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11626+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11627+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11628+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11629+#endif
11630+
11631 if (!*poff || ptr < text || ptr >= text_end)
11632 continue;
11633 /* turn lock prefix into DS segment override prefix */
11634- if (*ptr == 0xf0)
11635+ if (*ktla_ktva(ptr) == 0xf0)
11636 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11637 };
11638 mutex_unlock(&text_mutex);
11639@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11640
11641 BUG_ON(p->len > MAX_PATCH_LEN);
11642 /* prep the buffer with the original instructions */
11643- memcpy(insnbuf, p->instr, p->len);
11644+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11645 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11646 (unsigned long)p->instr, p->len);
11647
11648@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11649 if (smp_alt_once)
11650 free_init_pages("SMP alternatives",
11651 (unsigned long)__smp_locks,
11652- (unsigned long)__smp_locks_end);
11653+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11654
11655 restart_nmi();
11656 }
11657@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11658 * instructions. And on the local CPU you need to be protected again NMI or MCE
11659 * handlers seeing an inconsistent instruction while you patch.
11660 */
11661-void *__init_or_module text_poke_early(void *addr, const void *opcode,
11662+void *__kprobes text_poke_early(void *addr, const void *opcode,
11663 size_t len)
11664 {
11665 unsigned long flags;
11666 local_irq_save(flags);
11667- memcpy(addr, opcode, len);
11668+
11669+ pax_open_kernel();
11670+ memcpy(ktla_ktva(addr), opcode, len);
11671 sync_core();
11672+ pax_close_kernel();
11673+
11674 local_irq_restore(flags);
11675 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11676 that causes hangs on some VIA CPUs. */
11677@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11678 */
11679 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11680 {
11681- unsigned long flags;
11682- char *vaddr;
11683+ unsigned char *vaddr = ktla_ktva(addr);
11684 struct page *pages[2];
11685- int i;
11686+ size_t i;
11687
11688 if (!core_kernel_text((unsigned long)addr)) {
11689- pages[0] = vmalloc_to_page(addr);
11690- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11691+ pages[0] = vmalloc_to_page(vaddr);
11692+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11693 } else {
11694- pages[0] = virt_to_page(addr);
11695+ pages[0] = virt_to_page(vaddr);
11696 WARN_ON(!PageReserved(pages[0]));
11697- pages[1] = virt_to_page(addr + PAGE_SIZE);
11698+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11699 }
11700 BUG_ON(!pages[0]);
11701- local_irq_save(flags);
11702- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11703- if (pages[1])
11704- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11705- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11706- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11707- clear_fixmap(FIX_TEXT_POKE0);
11708- if (pages[1])
11709- clear_fixmap(FIX_TEXT_POKE1);
11710- local_flush_tlb();
11711- sync_core();
11712- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11713- that causes hangs on some VIA CPUs. */
11714+ text_poke_early(addr, opcode, len);
11715 for (i = 0; i < len; i++)
11716- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11717- local_irq_restore(flags);
11718+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11719 return addr;
11720 }
11721
11722diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11723index f98d84c..e402a69 100644
11724--- a/arch/x86/kernel/apic/apic.c
11725+++ b/arch/x86/kernel/apic/apic.c
11726@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11727 /*
11728 * Debug level, exported for io_apic.c
11729 */
11730-unsigned int apic_verbosity;
11731+int apic_verbosity;
11732
11733 int pic_mode;
11734
11735@@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11736 apic_write(APIC_ESR, 0);
11737 v1 = apic_read(APIC_ESR);
11738 ack_APIC_irq();
11739- atomic_inc(&irq_err_count);
11740+ atomic_inc_unchecked(&irq_err_count);
11741
11742 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11743 smp_processor_id(), v0 , v1);
11744diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11745index 6d939d7..0697fcc 100644
11746--- a/arch/x86/kernel/apic/io_apic.c
11747+++ b/arch/x86/kernel/apic/io_apic.c
11748@@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11749 }
11750 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11751
11752-void lock_vector_lock(void)
11753+void lock_vector_lock(void) __acquires(vector_lock)
11754 {
11755 /* Used to the online set of cpus does not change
11756 * during assign_irq_vector.
11757@@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
11758 raw_spin_lock(&vector_lock);
11759 }
11760
11761-void unlock_vector_lock(void)
11762+void unlock_vector_lock(void) __releases(vector_lock)
11763 {
11764 raw_spin_unlock(&vector_lock);
11765 }
11766@@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
11767 ack_APIC_irq();
11768 }
11769
11770-atomic_t irq_mis_count;
11771+atomic_unchecked_t irq_mis_count;
11772
11773 static void ack_apic_level(struct irq_data *data)
11774 {
11775@@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
11776 * at the cpu.
11777 */
11778 if (!(v & (1 << (i & 0x1f)))) {
11779- atomic_inc(&irq_mis_count);
11780+ atomic_inc_unchecked(&irq_mis_count);
11781
11782 eoi_ioapic_irq(irq, cfg);
11783 }
11784diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
11785index a46bd38..6b906d7 100644
11786--- a/arch/x86/kernel/apm_32.c
11787+++ b/arch/x86/kernel/apm_32.c
11788@@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
11789 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11790 * even though they are called in protected mode.
11791 */
11792-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11793+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11794 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11795
11796 static const char driver_version[] = "1.16ac"; /* no spaces */
11797@@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
11798 BUG_ON(cpu != 0);
11799 gdt = get_cpu_gdt_table(cpu);
11800 save_desc_40 = gdt[0x40 / 8];
11801+
11802+ pax_open_kernel();
11803 gdt[0x40 / 8] = bad_bios_desc;
11804+ pax_close_kernel();
11805
11806 apm_irq_save(flags);
11807 APM_DO_SAVE_SEGS;
11808@@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
11809 &call->esi);
11810 APM_DO_RESTORE_SEGS;
11811 apm_irq_restore(flags);
11812+
11813+ pax_open_kernel();
11814 gdt[0x40 / 8] = save_desc_40;
11815+ pax_close_kernel();
11816+
11817 put_cpu();
11818
11819 return call->eax & 0xff;
11820@@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
11821 BUG_ON(cpu != 0);
11822 gdt = get_cpu_gdt_table(cpu);
11823 save_desc_40 = gdt[0x40 / 8];
11824+
11825+ pax_open_kernel();
11826 gdt[0x40 / 8] = bad_bios_desc;
11827+ pax_close_kernel();
11828
11829 apm_irq_save(flags);
11830 APM_DO_SAVE_SEGS;
11831@@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
11832 &call->eax);
11833 APM_DO_RESTORE_SEGS;
11834 apm_irq_restore(flags);
11835+
11836+ pax_open_kernel();
11837 gdt[0x40 / 8] = save_desc_40;
11838+ pax_close_kernel();
11839+
11840 put_cpu();
11841 return error;
11842 }
11843@@ -2347,12 +2361,15 @@ static int __init apm_init(void)
11844 * code to that CPU.
11845 */
11846 gdt = get_cpu_gdt_table(0);
11847+
11848+ pax_open_kernel();
11849 set_desc_base(&gdt[APM_CS >> 3],
11850 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11851 set_desc_base(&gdt[APM_CS_16 >> 3],
11852 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11853 set_desc_base(&gdt[APM_DS >> 3],
11854 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11855+ pax_close_kernel();
11856
11857 proc_create("apm", 0, NULL, &apm_file_ops);
11858
11859diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
11860index 4f13faf..87db5d2 100644
11861--- a/arch/x86/kernel/asm-offsets.c
11862+++ b/arch/x86/kernel/asm-offsets.c
11863@@ -33,6 +33,8 @@ void common(void) {
11864 OFFSET(TI_status, thread_info, status);
11865 OFFSET(TI_addr_limit, thread_info, addr_limit);
11866 OFFSET(TI_preempt_count, thread_info, preempt_count);
11867+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11868+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11869
11870 BLANK();
11871 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
11872@@ -53,8 +55,26 @@ void common(void) {
11873 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11874 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11875 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11876+
11877+#ifdef CONFIG_PAX_KERNEXEC
11878+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11879 #endif
11880
11881+#ifdef CONFIG_PAX_MEMORY_UDEREF
11882+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11883+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11884+#ifdef CONFIG_X86_64
11885+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
11886+#endif
11887+#endif
11888+
11889+#endif
11890+
11891+ BLANK();
11892+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11893+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11894+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11895+
11896 #ifdef CONFIG_XEN
11897 BLANK();
11898 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11899diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
11900index e72a119..6e2955d 100644
11901--- a/arch/x86/kernel/asm-offsets_64.c
11902+++ b/arch/x86/kernel/asm-offsets_64.c
11903@@ -69,6 +69,7 @@ int main(void)
11904 BLANK();
11905 #undef ENTRY
11906
11907+ DEFINE(TSS_size, sizeof(struct tss_struct));
11908 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
11909 BLANK();
11910
11911diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
11912index 25f24dc..4094a7f 100644
11913--- a/arch/x86/kernel/cpu/Makefile
11914+++ b/arch/x86/kernel/cpu/Makefile
11915@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11916 CFLAGS_REMOVE_perf_event.o = -pg
11917 endif
11918
11919-# Make sure load_percpu_segment has no stackprotector
11920-nostackp := $(call cc-option, -fno-stack-protector)
11921-CFLAGS_common.o := $(nostackp)
11922-
11923 obj-y := intel_cacheinfo.o scattered.o topology.o
11924 obj-y += proc.o capflags.o powerflags.o common.o
11925 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11926diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
11927index 0bab2b1..d0a1bf8 100644
11928--- a/arch/x86/kernel/cpu/amd.c
11929+++ b/arch/x86/kernel/cpu/amd.c
11930@@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
11931 unsigned int size)
11932 {
11933 /* AMD errata T13 (order #21922) */
11934- if ((c->x86 == 6)) {
11935+ if (c->x86 == 6) {
11936 /* Duron Rev A0 */
11937 if (c->x86_model == 3 && c->x86_mask == 0)
11938 size = 64;
11939diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
11940index aa003b1..47ea638 100644
11941--- a/arch/x86/kernel/cpu/common.c
11942+++ b/arch/x86/kernel/cpu/common.c
11943@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
11944
11945 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11946
11947-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11948-#ifdef CONFIG_X86_64
11949- /*
11950- * We need valid kernel segments for data and code in long mode too
11951- * IRET will check the segment types kkeil 2000/10/28
11952- * Also sysret mandates a special GDT layout
11953- *
11954- * TLS descriptors are currently at a different place compared to i386.
11955- * Hopefully nobody expects them at a fixed place (Wine?)
11956- */
11957- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11958- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11959- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11960- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11961- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11962- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11963-#else
11964- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11965- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11966- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11967- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11968- /*
11969- * Segments used for calling PnP BIOS have byte granularity.
11970- * They code segments and data segments have fixed 64k limits,
11971- * the transfer segment sizes are set at run time.
11972- */
11973- /* 32-bit code */
11974- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11975- /* 16-bit code */
11976- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11977- /* 16-bit data */
11978- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11979- /* 16-bit data */
11980- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11981- /* 16-bit data */
11982- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11983- /*
11984- * The APM segments have byte granularity and their bases
11985- * are set at run time. All have 64k limits.
11986- */
11987- /* 32-bit code */
11988- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11989- /* 16-bit code */
11990- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11991- /* data */
11992- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11993-
11994- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11995- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11996- GDT_STACK_CANARY_INIT
11997-#endif
11998-} };
11999-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12000-
12001 static int __init x86_xsave_setup(char *s)
12002 {
12003 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12004@@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
12005 {
12006 struct desc_ptr gdt_descr;
12007
12008- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12009+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12010 gdt_descr.size = GDT_SIZE - 1;
12011 load_gdt(&gdt_descr);
12012 /* Reload the per-cpu base */
12013@@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
12014 /* Filter out anything that depends on CPUID levels we don't have */
12015 filter_cpuid_features(c, true);
12016
12017+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
12018+ setup_clear_cpu_cap(X86_FEATURE_SEP);
12019+#endif
12020+
12021 /* If the model name is still unset, do table lookup. */
12022 if (!c->x86_model_id[0]) {
12023 const char *p;
12024@@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
12025 }
12026 __setup("clearcpuid=", setup_disablecpuid);
12027
12028+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12029+EXPORT_PER_CPU_SYMBOL(current_tinfo);
12030+
12031 #ifdef CONFIG_X86_64
12032 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12033
12034@@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
12035 EXPORT_PER_CPU_SYMBOL(current_task);
12036
12037 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12038- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12039+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12040 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12041
12042 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12043@@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
12044 {
12045 memset(regs, 0, sizeof(struct pt_regs));
12046 regs->fs = __KERNEL_PERCPU;
12047- regs->gs = __KERNEL_STACK_CANARY;
12048+ savesegment(gs, regs->gs);
12049
12050 return regs;
12051 }
12052@@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
12053 int i;
12054
12055 cpu = stack_smp_processor_id();
12056- t = &per_cpu(init_tss, cpu);
12057+ t = init_tss + cpu;
12058 oist = &per_cpu(orig_ist, cpu);
12059
12060 #ifdef CONFIG_NUMA
12061@@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
12062 switch_to_new_gdt(cpu);
12063 loadsegment(fs, 0);
12064
12065- load_idt((const struct desc_ptr *)&idt_descr);
12066+ load_idt(&idt_descr);
12067
12068 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12069 syscall_init();
12070@@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
12071 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12072 barrier();
12073
12074- x86_configure_nx();
12075 if (cpu != 0)
12076 enable_x2apic();
12077
12078@@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
12079 {
12080 int cpu = smp_processor_id();
12081 struct task_struct *curr = current;
12082- struct tss_struct *t = &per_cpu(init_tss, cpu);
12083+ struct tss_struct *t = init_tss + cpu;
12084 struct thread_struct *thread = &curr->thread;
12085
12086 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12087diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12088index 5231312..a78a987 100644
12089--- a/arch/x86/kernel/cpu/intel.c
12090+++ b/arch/x86/kernel/cpu/intel.c
12091@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12092 * Update the IDT descriptor and reload the IDT so that
12093 * it uses the read-only mapped virtual address.
12094 */
12095- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12096+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12097 load_idt(&idt_descr);
12098 }
12099 #endif
12100diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12101index 2af127d..8ff7ac0 100644
12102--- a/arch/x86/kernel/cpu/mcheck/mce.c
12103+++ b/arch/x86/kernel/cpu/mcheck/mce.c
12104@@ -42,6 +42,7 @@
12105 #include <asm/processor.h>
12106 #include <asm/mce.h>
12107 #include <asm/msr.h>
12108+#include <asm/local.h>
12109
12110 #include "mce-internal.h"
12111
12112@@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
12113 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12114 m->cs, m->ip);
12115
12116- if (m->cs == __KERNEL_CS)
12117+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12118 print_symbol("{%s}", m->ip);
12119 pr_cont("\n");
12120 }
12121@@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
12122
12123 #define PANIC_TIMEOUT 5 /* 5 seconds */
12124
12125-static atomic_t mce_paniced;
12126+static atomic_unchecked_t mce_paniced;
12127
12128 static int fake_panic;
12129-static atomic_t mce_fake_paniced;
12130+static atomic_unchecked_t mce_fake_paniced;
12131
12132 /* Panic in progress. Enable interrupts and wait for final IPI */
12133 static void wait_for_panic(void)
12134@@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12135 /*
12136 * Make sure only one CPU runs in machine check panic
12137 */
12138- if (atomic_inc_return(&mce_paniced) > 1)
12139+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12140 wait_for_panic();
12141 barrier();
12142
12143@@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12144 console_verbose();
12145 } else {
12146 /* Don't log too much for fake panic */
12147- if (atomic_inc_return(&mce_fake_paniced) > 1)
12148+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12149 return;
12150 }
12151 /* First print corrected ones that are still unlogged */
12152@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12153 * might have been modified by someone else.
12154 */
12155 rmb();
12156- if (atomic_read(&mce_paniced))
12157+ if (atomic_read_unchecked(&mce_paniced))
12158 wait_for_panic();
12159 if (!monarch_timeout)
12160 goto out;
12161@@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12162 }
12163
12164 /* Call the installed machine check handler for this CPU setup. */
12165-void (*machine_check_vector)(struct pt_regs *, long error_code) =
12166+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12167 unexpected_machine_check;
12168
12169 /*
12170@@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12171 return;
12172 }
12173
12174+ pax_open_kernel();
12175 machine_check_vector = do_machine_check;
12176+ pax_close_kernel();
12177
12178 __mcheck_cpu_init_generic();
12179 __mcheck_cpu_init_vendor(c);
12180@@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12181 */
12182
12183 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12184-static int mce_chrdev_open_count; /* #times opened */
12185+static local_t mce_chrdev_open_count; /* #times opened */
12186 static int mce_chrdev_open_exclu; /* already open exclusive? */
12187
12188 static int mce_chrdev_open(struct inode *inode, struct file *file)
12189@@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12190 spin_lock(&mce_chrdev_state_lock);
12191
12192 if (mce_chrdev_open_exclu ||
12193- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12194+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12195 spin_unlock(&mce_chrdev_state_lock);
12196
12197 return -EBUSY;
12198@@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12199
12200 if (file->f_flags & O_EXCL)
12201 mce_chrdev_open_exclu = 1;
12202- mce_chrdev_open_count++;
12203+ local_inc(&mce_chrdev_open_count);
12204
12205 spin_unlock(&mce_chrdev_state_lock);
12206
12207@@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12208 {
12209 spin_lock(&mce_chrdev_state_lock);
12210
12211- mce_chrdev_open_count--;
12212+ local_dec(&mce_chrdev_open_count);
12213 mce_chrdev_open_exclu = 0;
12214
12215 spin_unlock(&mce_chrdev_state_lock);
12216@@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
12217 static void mce_reset(void)
12218 {
12219 cpu_missing = 0;
12220- atomic_set(&mce_fake_paniced, 0);
12221+ atomic_set_unchecked(&mce_fake_paniced, 0);
12222 atomic_set(&mce_executing, 0);
12223 atomic_set(&mce_callin, 0);
12224 atomic_set(&global_nwo, 0);
12225diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12226index 5c0e653..1e82c7c 100644
12227--- a/arch/x86/kernel/cpu/mcheck/p5.c
12228+++ b/arch/x86/kernel/cpu/mcheck/p5.c
12229@@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12230 if (!cpu_has(c, X86_FEATURE_MCE))
12231 return;
12232
12233+ pax_open_kernel();
12234 machine_check_vector = pentium_machine_check;
12235+ pax_close_kernel();
12236 /* Make sure the vector pointer is visible before we enable MCEs: */
12237 wmb();
12238
12239diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12240index 54060f5..e6ba93d 100644
12241--- a/arch/x86/kernel/cpu/mcheck/winchip.c
12242+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12243@@ -24,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12244 {
12245 u32 lo, hi;
12246
12247+ pax_open_kernel();
12248 machine_check_vector = winchip_machine_check;
12249+ pax_close_kernel();
12250 /* Make sure the vector pointer is visible before we enable MCEs: */
12251 wmb();
12252
12253diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12254index 6b96110..0da73eb 100644
12255--- a/arch/x86/kernel/cpu/mtrr/main.c
12256+++ b/arch/x86/kernel/cpu/mtrr/main.c
12257@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12258 u64 size_or_mask, size_and_mask;
12259 static bool mtrr_aps_delayed_init;
12260
12261-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12262+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12263
12264 const struct mtrr_ops *mtrr_if;
12265
12266diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12267index df5e41f..816c719 100644
12268--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12269+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12270@@ -25,7 +25,7 @@ struct mtrr_ops {
12271 int (*validate_add_page)(unsigned long base, unsigned long size,
12272 unsigned int type);
12273 int (*have_wrcomb)(void);
12274-};
12275+} __do_const;
12276
12277 extern int generic_get_free_region(unsigned long base, unsigned long size,
12278 int replace_reg);
12279diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12280index 2bda212..78cc605 100644
12281--- a/arch/x86/kernel/cpu/perf_event.c
12282+++ b/arch/x86/kernel/cpu/perf_event.c
12283@@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12284 break;
12285
12286 perf_callchain_store(entry, frame.return_address);
12287- fp = frame.next_frame;
12288+ fp = (const void __force_user *)frame.next_frame;
12289 }
12290 }
12291
12292diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12293index 13ad899..f642b9a 100644
12294--- a/arch/x86/kernel/crash.c
12295+++ b/arch/x86/kernel/crash.c
12296@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
12297 {
12298 #ifdef CONFIG_X86_32
12299 struct pt_regs fixed_regs;
12300-#endif
12301
12302-#ifdef CONFIG_X86_32
12303- if (!user_mode_vm(regs)) {
12304+ if (!user_mode(regs)) {
12305 crash_fixup_ss_esp(&fixed_regs, regs);
12306 regs = &fixed_regs;
12307 }
12308diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12309index 37250fe..bf2ec74 100644
12310--- a/arch/x86/kernel/doublefault_32.c
12311+++ b/arch/x86/kernel/doublefault_32.c
12312@@ -11,7 +11,7 @@
12313
12314 #define DOUBLEFAULT_STACKSIZE (1024)
12315 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12316-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12317+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12318
12319 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12320
12321@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12322 unsigned long gdt, tss;
12323
12324 store_gdt(&gdt_desc);
12325- gdt = gdt_desc.address;
12326+ gdt = (unsigned long)gdt_desc.address;
12327
12328 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12329
12330@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12331 /* 0x2 bit is always set */
12332 .flags = X86_EFLAGS_SF | 0x2,
12333 .sp = STACK_START,
12334- .es = __USER_DS,
12335+ .es = __KERNEL_DS,
12336 .cs = __KERNEL_CS,
12337 .ss = __KERNEL_DS,
12338- .ds = __USER_DS,
12339+ .ds = __KERNEL_DS,
12340 .fs = __KERNEL_PERCPU,
12341
12342 .__cr3 = __pa_nodebug(swapper_pg_dir),
12343diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12344index 1aae78f..aab3a3d 100644
12345--- a/arch/x86/kernel/dumpstack.c
12346+++ b/arch/x86/kernel/dumpstack.c
12347@@ -2,6 +2,9 @@
12348 * Copyright (C) 1991, 1992 Linus Torvalds
12349 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12350 */
12351+#ifdef CONFIG_GRKERNSEC_HIDESYM
12352+#define __INCLUDED_BY_HIDESYM 1
12353+#endif
12354 #include <linux/kallsyms.h>
12355 #include <linux/kprobes.h>
12356 #include <linux/uaccess.h>
12357@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12358 static void
12359 print_ftrace_graph_addr(unsigned long addr, void *data,
12360 const struct stacktrace_ops *ops,
12361- struct thread_info *tinfo, int *graph)
12362+ struct task_struct *task, int *graph)
12363 {
12364- struct task_struct *task = tinfo->task;
12365 unsigned long ret_addr;
12366 int index = task->curr_ret_stack;
12367
12368@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12369 static inline void
12370 print_ftrace_graph_addr(unsigned long addr, void *data,
12371 const struct stacktrace_ops *ops,
12372- struct thread_info *tinfo, int *graph)
12373+ struct task_struct *task, int *graph)
12374 { }
12375 #endif
12376
12377@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12378 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12379 */
12380
12381-static inline int valid_stack_ptr(struct thread_info *tinfo,
12382- void *p, unsigned int size, void *end)
12383+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12384 {
12385- void *t = tinfo;
12386 if (end) {
12387 if (p < end && p >= (end-THREAD_SIZE))
12388 return 1;
12389@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12390 }
12391
12392 unsigned long
12393-print_context_stack(struct thread_info *tinfo,
12394+print_context_stack(struct task_struct *task, void *stack_start,
12395 unsigned long *stack, unsigned long bp,
12396 const struct stacktrace_ops *ops, void *data,
12397 unsigned long *end, int *graph)
12398 {
12399 struct stack_frame *frame = (struct stack_frame *)bp;
12400
12401- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12402+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12403 unsigned long addr;
12404
12405 addr = *stack;
12406@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12407 } else {
12408 ops->address(data, addr, 0);
12409 }
12410- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12411+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12412 }
12413 stack++;
12414 }
12415@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12416 EXPORT_SYMBOL_GPL(print_context_stack);
12417
12418 unsigned long
12419-print_context_stack_bp(struct thread_info *tinfo,
12420+print_context_stack_bp(struct task_struct *task, void *stack_start,
12421 unsigned long *stack, unsigned long bp,
12422 const struct stacktrace_ops *ops, void *data,
12423 unsigned long *end, int *graph)
12424@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12425 struct stack_frame *frame = (struct stack_frame *)bp;
12426 unsigned long *ret_addr = &frame->return_address;
12427
12428- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12429+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12430 unsigned long addr = *ret_addr;
12431
12432 if (!__kernel_text_address(addr))
12433@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12434 ops->address(data, addr, 1);
12435 frame = frame->next_frame;
12436 ret_addr = &frame->return_address;
12437- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12438+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12439 }
12440
12441 return (unsigned long)frame;
12442@@ -186,7 +186,7 @@ void dump_stack(void)
12443
12444 bp = stack_frame(current, NULL);
12445 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12446- current->pid, current->comm, print_tainted(),
12447+ task_pid_nr(current), current->comm, print_tainted(),
12448 init_utsname()->release,
12449 (int)strcspn(init_utsname()->version, " "),
12450 init_utsname()->version);
12451@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12452 }
12453 EXPORT_SYMBOL_GPL(oops_begin);
12454
12455+extern void gr_handle_kernel_exploit(void);
12456+
12457 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12458 {
12459 if (regs && kexec_should_crash(current))
12460@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12461 panic("Fatal exception in interrupt");
12462 if (panic_on_oops)
12463 panic("Fatal exception");
12464- do_exit(signr);
12465+
12466+ gr_handle_kernel_exploit();
12467+
12468+ do_group_exit(signr);
12469 }
12470
12471 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12472@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12473
12474 show_registers(regs);
12475 #ifdef CONFIG_X86_32
12476- if (user_mode_vm(regs)) {
12477+ if (user_mode(regs)) {
12478 sp = regs->sp;
12479 ss = regs->ss & 0xffff;
12480 } else {
12481@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12482 unsigned long flags = oops_begin();
12483 int sig = SIGSEGV;
12484
12485- if (!user_mode_vm(regs))
12486+ if (!user_mode(regs))
12487 report_bug(regs->ip, regs);
12488
12489 if (__die(str, regs, err))
12490diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12491index c99f9ed..2a15d80 100644
12492--- a/arch/x86/kernel/dumpstack_32.c
12493+++ b/arch/x86/kernel/dumpstack_32.c
12494@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12495 bp = stack_frame(task, regs);
12496
12497 for (;;) {
12498- struct thread_info *context;
12499+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12500
12501- context = (struct thread_info *)
12502- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12503- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12504+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12505
12506- stack = (unsigned long *)context->previous_esp;
12507- if (!stack)
12508+ if (stack_start == task_stack_page(task))
12509 break;
12510+ stack = *(unsigned long **)stack_start;
12511 if (ops->stack(data, "IRQ") < 0)
12512 break;
12513 touch_nmi_watchdog();
12514@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12515 * When in-kernel, we also print out the stack and code at the
12516 * time of the fault..
12517 */
12518- if (!user_mode_vm(regs)) {
12519+ if (!user_mode(regs)) {
12520 unsigned int code_prologue = code_bytes * 43 / 64;
12521 unsigned int code_len = code_bytes;
12522 unsigned char c;
12523 u8 *ip;
12524+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12525
12526 printk(KERN_EMERG "Stack:\n");
12527 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12528
12529 printk(KERN_EMERG "Code: ");
12530
12531- ip = (u8 *)regs->ip - code_prologue;
12532+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12533 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12534 /* try starting at IP */
12535- ip = (u8 *)regs->ip;
12536+ ip = (u8 *)regs->ip + cs_base;
12537 code_len = code_len - code_prologue + 1;
12538 }
12539 for (i = 0; i < code_len; i++, ip++) {
12540@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12541 printk(KERN_CONT " Bad EIP value.");
12542 break;
12543 }
12544- if (ip == (u8 *)regs->ip)
12545+ if (ip == (u8 *)regs->ip + cs_base)
12546 printk(KERN_CONT "<%02x> ", c);
12547 else
12548 printk(KERN_CONT "%02x ", c);
12549@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12550 {
12551 unsigned short ud2;
12552
12553+ ip = ktla_ktva(ip);
12554 if (ip < PAGE_OFFSET)
12555 return 0;
12556 if (probe_kernel_address((unsigned short *)ip, ud2))
12557@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12558
12559 return ud2 == 0x0b0f;
12560 }
12561+
12562+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12563+void pax_check_alloca(unsigned long size)
12564+{
12565+ unsigned long sp = (unsigned long)&sp, stack_left;
12566+
12567+ /* all kernel stacks are of the same size */
12568+ stack_left = sp & (THREAD_SIZE - 1);
12569+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12570+}
12571+EXPORT_SYMBOL(pax_check_alloca);
12572+#endif
12573diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12574index 6d728d9..279514e 100644
12575--- a/arch/x86/kernel/dumpstack_64.c
12576+++ b/arch/x86/kernel/dumpstack_64.c
12577@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12578 unsigned long *irq_stack_end =
12579 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12580 unsigned used = 0;
12581- struct thread_info *tinfo;
12582 int graph = 0;
12583 unsigned long dummy;
12584+ void *stack_start;
12585
12586 if (!task)
12587 task = current;
12588@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12589 * current stack address. If the stacks consist of nested
12590 * exceptions
12591 */
12592- tinfo = task_thread_info(task);
12593 for (;;) {
12594 char *id;
12595 unsigned long *estack_end;
12596+
12597 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12598 &used, &id);
12599
12600@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12601 if (ops->stack(data, id) < 0)
12602 break;
12603
12604- bp = ops->walk_stack(tinfo, stack, bp, ops,
12605+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12606 data, estack_end, &graph);
12607 ops->stack(data, "<EOE>");
12608 /*
12609@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12610 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12611 if (ops->stack(data, "IRQ") < 0)
12612 break;
12613- bp = ops->walk_stack(tinfo, stack, bp,
12614+ bp = ops->walk_stack(task, irq_stack, stack, bp,
12615 ops, data, irq_stack_end, &graph);
12616 /*
12617 * We link to the next stack (which would be
12618@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12619 /*
12620 * This handles the process stack:
12621 */
12622- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12623+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12624+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12625 put_cpu();
12626 }
12627 EXPORT_SYMBOL(dump_trace);
12628@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12629
12630 return ud2 == 0x0b0f;
12631 }
12632+
12633+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12634+void pax_check_alloca(unsigned long size)
12635+{
12636+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12637+ unsigned cpu, used;
12638+ char *id;
12639+
12640+ /* check the process stack first */
12641+ stack_start = (unsigned long)task_stack_page(current);
12642+ stack_end = stack_start + THREAD_SIZE;
12643+ if (likely(stack_start <= sp && sp < stack_end)) {
12644+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
12645+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12646+ return;
12647+ }
12648+
12649+ cpu = get_cpu();
12650+
12651+ /* check the irq stacks */
12652+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12653+ stack_start = stack_end - IRQ_STACK_SIZE;
12654+ if (stack_start <= sp && sp < stack_end) {
12655+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12656+ put_cpu();
12657+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12658+ return;
12659+ }
12660+
12661+ /* check the exception stacks */
12662+ used = 0;
12663+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12664+ stack_start = stack_end - EXCEPTION_STKSZ;
12665+ if (stack_end && stack_start <= sp && sp < stack_end) {
12666+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12667+ put_cpu();
12668+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12669+ return;
12670+ }
12671+
12672+ put_cpu();
12673+
12674+ /* unknown stack */
12675+ BUG();
12676+}
12677+EXPORT_SYMBOL(pax_check_alloca);
12678+#endif
12679diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12680index cd28a35..c72ed9a 100644
12681--- a/arch/x86/kernel/early_printk.c
12682+++ b/arch/x86/kernel/early_printk.c
12683@@ -7,6 +7,7 @@
12684 #include <linux/pci_regs.h>
12685 #include <linux/pci_ids.h>
12686 #include <linux/errno.h>
12687+#include <linux/sched.h>
12688 #include <asm/io.h>
12689 #include <asm/processor.h>
12690 #include <asm/fcntl.h>
12691diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12692index f3f6f53..0841b66 100644
12693--- a/arch/x86/kernel/entry_32.S
12694+++ b/arch/x86/kernel/entry_32.S
12695@@ -186,13 +186,146 @@
12696 /*CFI_REL_OFFSET gs, PT_GS*/
12697 .endm
12698 .macro SET_KERNEL_GS reg
12699+
12700+#ifdef CONFIG_CC_STACKPROTECTOR
12701 movl $(__KERNEL_STACK_CANARY), \reg
12702+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12703+ movl $(__USER_DS), \reg
12704+#else
12705+ xorl \reg, \reg
12706+#endif
12707+
12708 movl \reg, %gs
12709 .endm
12710
12711 #endif /* CONFIG_X86_32_LAZY_GS */
12712
12713-.macro SAVE_ALL
12714+.macro pax_enter_kernel
12715+#ifdef CONFIG_PAX_KERNEXEC
12716+ call pax_enter_kernel
12717+#endif
12718+.endm
12719+
12720+.macro pax_exit_kernel
12721+#ifdef CONFIG_PAX_KERNEXEC
12722+ call pax_exit_kernel
12723+#endif
12724+.endm
12725+
12726+#ifdef CONFIG_PAX_KERNEXEC
12727+ENTRY(pax_enter_kernel)
12728+#ifdef CONFIG_PARAVIRT
12729+ pushl %eax
12730+ pushl %ecx
12731+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12732+ mov %eax, %esi
12733+#else
12734+ mov %cr0, %esi
12735+#endif
12736+ bts $16, %esi
12737+ jnc 1f
12738+ mov %cs, %esi
12739+ cmp $__KERNEL_CS, %esi
12740+ jz 3f
12741+ ljmp $__KERNEL_CS, $3f
12742+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12743+2:
12744+#ifdef CONFIG_PARAVIRT
12745+ mov %esi, %eax
12746+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12747+#else
12748+ mov %esi, %cr0
12749+#endif
12750+3:
12751+#ifdef CONFIG_PARAVIRT
12752+ popl %ecx
12753+ popl %eax
12754+#endif
12755+ ret
12756+ENDPROC(pax_enter_kernel)
12757+
12758+ENTRY(pax_exit_kernel)
12759+#ifdef CONFIG_PARAVIRT
12760+ pushl %eax
12761+ pushl %ecx
12762+#endif
12763+ mov %cs, %esi
12764+ cmp $__KERNEXEC_KERNEL_CS, %esi
12765+ jnz 2f
12766+#ifdef CONFIG_PARAVIRT
12767+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12768+ mov %eax, %esi
12769+#else
12770+ mov %cr0, %esi
12771+#endif
12772+ btr $16, %esi
12773+ ljmp $__KERNEL_CS, $1f
12774+1:
12775+#ifdef CONFIG_PARAVIRT
12776+ mov %esi, %eax
12777+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12778+#else
12779+ mov %esi, %cr0
12780+#endif
12781+2:
12782+#ifdef CONFIG_PARAVIRT
12783+ popl %ecx
12784+ popl %eax
12785+#endif
12786+ ret
12787+ENDPROC(pax_exit_kernel)
12788+#endif
12789+
12790+.macro pax_erase_kstack
12791+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12792+ call pax_erase_kstack
12793+#endif
12794+.endm
12795+
12796+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12797+/*
12798+ * ebp: thread_info
12799+ * ecx, edx: can be clobbered
12800+ */
12801+ENTRY(pax_erase_kstack)
12802+ pushl %edi
12803+ pushl %eax
12804+
12805+ mov TI_lowest_stack(%ebp), %edi
12806+ mov $-0xBEEF, %eax
12807+ std
12808+
12809+1: mov %edi, %ecx
12810+ and $THREAD_SIZE_asm - 1, %ecx
12811+ shr $2, %ecx
12812+ repne scasl
12813+ jecxz 2f
12814+
12815+ cmp $2*16, %ecx
12816+ jc 2f
12817+
12818+ mov $2*16, %ecx
12819+ repe scasl
12820+ jecxz 2f
12821+ jne 1b
12822+
12823+2: cld
12824+ mov %esp, %ecx
12825+ sub %edi, %ecx
12826+ shr $2, %ecx
12827+ rep stosl
12828+
12829+ mov TI_task_thread_sp0(%ebp), %edi
12830+ sub $128, %edi
12831+ mov %edi, TI_lowest_stack(%ebp)
12832+
12833+ popl %eax
12834+ popl %edi
12835+ ret
12836+ENDPROC(pax_erase_kstack)
12837+#endif
12838+
12839+.macro __SAVE_ALL _DS
12840 cld
12841 PUSH_GS
12842 pushl_cfi %fs
12843@@ -215,7 +348,7 @@
12844 CFI_REL_OFFSET ecx, 0
12845 pushl_cfi %ebx
12846 CFI_REL_OFFSET ebx, 0
12847- movl $(__USER_DS), %edx
12848+ movl $\_DS, %edx
12849 movl %edx, %ds
12850 movl %edx, %es
12851 movl $(__KERNEL_PERCPU), %edx
12852@@ -223,6 +356,15 @@
12853 SET_KERNEL_GS %edx
12854 .endm
12855
12856+.macro SAVE_ALL
12857+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12858+ __SAVE_ALL __KERNEL_DS
12859+ pax_enter_kernel
12860+#else
12861+ __SAVE_ALL __USER_DS
12862+#endif
12863+.endm
12864+
12865 .macro RESTORE_INT_REGS
12866 popl_cfi %ebx
12867 CFI_RESTORE ebx
12868@@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
12869 popfl_cfi
12870 jmp syscall_exit
12871 CFI_ENDPROC
12872-END(ret_from_fork)
12873+ENDPROC(ret_from_fork)
12874
12875 /*
12876 * Interrupt exit functions should be protected against kprobes
12877@@ -333,7 +475,15 @@ check_userspace:
12878 movb PT_CS(%esp), %al
12879 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12880 cmpl $USER_RPL, %eax
12881+
12882+#ifdef CONFIG_PAX_KERNEXEC
12883+ jae resume_userspace
12884+
12885+ PAX_EXIT_KERNEL
12886+ jmp resume_kernel
12887+#else
12888 jb resume_kernel # not returning to v8086 or userspace
12889+#endif
12890
12891 ENTRY(resume_userspace)
12892 LOCKDEP_SYS_EXIT
12893@@ -345,8 +495,8 @@ ENTRY(resume_userspace)
12894 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12895 # int/exception return?
12896 jne work_pending
12897- jmp restore_all
12898-END(ret_from_exception)
12899+ jmp restore_all_pax
12900+ENDPROC(ret_from_exception)
12901
12902 #ifdef CONFIG_PREEMPT
12903 ENTRY(resume_kernel)
12904@@ -361,7 +511,7 @@ need_resched:
12905 jz restore_all
12906 call preempt_schedule_irq
12907 jmp need_resched
12908-END(resume_kernel)
12909+ENDPROC(resume_kernel)
12910 #endif
12911 CFI_ENDPROC
12912 /*
12913@@ -395,23 +545,34 @@ sysenter_past_esp:
12914 /*CFI_REL_OFFSET cs, 0*/
12915 /*
12916 * Push current_thread_info()->sysenter_return to the stack.
12917- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12918- * pushed above; +8 corresponds to copy_thread's esp0 setting.
12919 */
12920- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
12921+ pushl_cfi $0
12922 CFI_REL_OFFSET eip, 0
12923
12924 pushl_cfi %eax
12925 SAVE_ALL
12926+ GET_THREAD_INFO(%ebp)
12927+ movl TI_sysenter_return(%ebp),%ebp
12928+ movl %ebp,PT_EIP(%esp)
12929 ENABLE_INTERRUPTS(CLBR_NONE)
12930
12931 /*
12932 * Load the potential sixth argument from user stack.
12933 * Careful about security.
12934 */
12935+ movl PT_OLDESP(%esp),%ebp
12936+
12937+#ifdef CONFIG_PAX_MEMORY_UDEREF
12938+ mov PT_OLDSS(%esp),%ds
12939+1: movl %ds:(%ebp),%ebp
12940+ push %ss
12941+ pop %ds
12942+#else
12943 cmpl $__PAGE_OFFSET-3,%ebp
12944 jae syscall_fault
12945 1: movl (%ebp),%ebp
12946+#endif
12947+
12948 movl %ebp,PT_EBP(%esp)
12949 .section __ex_table,"a"
12950 .align 4
12951@@ -434,12 +595,24 @@ sysenter_do_call:
12952 testl $_TIF_ALLWORK_MASK, %ecx
12953 jne sysexit_audit
12954 sysenter_exit:
12955+
12956+#ifdef CONFIG_PAX_RANDKSTACK
12957+ pushl_cfi %eax
12958+ movl %esp, %eax
12959+ call pax_randomize_kstack
12960+ popl_cfi %eax
12961+#endif
12962+
12963+ pax_erase_kstack
12964+
12965 /* if something modifies registers it must also disable sysexit */
12966 movl PT_EIP(%esp), %edx
12967 movl PT_OLDESP(%esp), %ecx
12968 xorl %ebp,%ebp
12969 TRACE_IRQS_ON
12970 1: mov PT_FS(%esp), %fs
12971+2: mov PT_DS(%esp), %ds
12972+3: mov PT_ES(%esp), %es
12973 PTGS_TO_GS
12974 ENABLE_INTERRUPTS_SYSEXIT
12975
12976@@ -456,6 +629,9 @@ sysenter_audit:
12977 movl %eax,%edx /* 2nd arg: syscall number */
12978 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12979 call audit_syscall_entry
12980+
12981+ pax_erase_kstack
12982+
12983 pushl_cfi %ebx
12984 movl PT_EAX(%esp),%eax /* reload syscall number */
12985 jmp sysenter_do_call
12986@@ -482,11 +658,17 @@ sysexit_audit:
12987
12988 CFI_ENDPROC
12989 .pushsection .fixup,"ax"
12990-2: movl $0,PT_FS(%esp)
12991+4: movl $0,PT_FS(%esp)
12992+ jmp 1b
12993+5: movl $0,PT_DS(%esp)
12994+ jmp 1b
12995+6: movl $0,PT_ES(%esp)
12996 jmp 1b
12997 .section __ex_table,"a"
12998 .align 4
12999- .long 1b,2b
13000+ .long 1b,4b
13001+ .long 2b,5b
13002+ .long 3b,6b
13003 .popsection
13004 PTGS_TO_GS_EX
13005 ENDPROC(ia32_sysenter_target)
13006@@ -519,6 +701,15 @@ syscall_exit:
13007 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13008 jne syscall_exit_work
13009
13010+restore_all_pax:
13011+
13012+#ifdef CONFIG_PAX_RANDKSTACK
13013+ movl %esp, %eax
13014+ call pax_randomize_kstack
13015+#endif
13016+
13017+ pax_erase_kstack
13018+
13019 restore_all:
13020 TRACE_IRQS_IRET
13021 restore_all_notrace:
13022@@ -578,14 +769,34 @@ ldt_ss:
13023 * compensating for the offset by changing to the ESPFIX segment with
13024 * a base address that matches for the difference.
13025 */
13026-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
13027+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
13028 mov %esp, %edx /* load kernel esp */
13029 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13030 mov %dx, %ax /* eax: new kernel esp */
13031 sub %eax, %edx /* offset (low word is 0) */
13032+#ifdef CONFIG_SMP
13033+ movl PER_CPU_VAR(cpu_number), %ebx
13034+ shll $PAGE_SHIFT_asm, %ebx
13035+ addl $cpu_gdt_table, %ebx
13036+#else
13037+ movl $cpu_gdt_table, %ebx
13038+#endif
13039 shr $16, %edx
13040- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
13041- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
13042+
13043+#ifdef CONFIG_PAX_KERNEXEC
13044+ mov %cr0, %esi
13045+ btr $16, %esi
13046+ mov %esi, %cr0
13047+#endif
13048+
13049+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13050+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
13051+
13052+#ifdef CONFIG_PAX_KERNEXEC
13053+ bts $16, %esi
13054+ mov %esi, %cr0
13055+#endif
13056+
13057 pushl_cfi $__ESPFIX_SS
13058 pushl_cfi %eax /* new kernel esp */
13059 /* Disable interrupts, but do not irqtrace this section: we
13060@@ -614,34 +825,28 @@ work_resched:
13061 movl TI_flags(%ebp), %ecx
13062 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13063 # than syscall tracing?
13064- jz restore_all
13065+ jz restore_all_pax
13066 testb $_TIF_NEED_RESCHED, %cl
13067 jnz work_resched
13068
13069 work_notifysig: # deal with pending signals and
13070 # notify-resume requests
13071+ movl %esp, %eax
13072 #ifdef CONFIG_VM86
13073 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13074- movl %esp, %eax
13075- jne work_notifysig_v86 # returning to kernel-space or
13076+ jz 1f # returning to kernel-space or
13077 # vm86-space
13078- xorl %edx, %edx
13079- call do_notify_resume
13080- jmp resume_userspace_sig
13081
13082- ALIGN
13083-work_notifysig_v86:
13084 pushl_cfi %ecx # save ti_flags for do_notify_resume
13085 call save_v86_state # %eax contains pt_regs pointer
13086 popl_cfi %ecx
13087 movl %eax, %esp
13088-#else
13089- movl %esp, %eax
13090+1:
13091 #endif
13092 xorl %edx, %edx
13093 call do_notify_resume
13094 jmp resume_userspace_sig
13095-END(work_pending)
13096+ENDPROC(work_pending)
13097
13098 # perform syscall exit tracing
13099 ALIGN
13100@@ -649,11 +854,14 @@ syscall_trace_entry:
13101 movl $-ENOSYS,PT_EAX(%esp)
13102 movl %esp, %eax
13103 call syscall_trace_enter
13104+
13105+ pax_erase_kstack
13106+
13107 /* What it returned is what we'll actually use. */
13108 cmpl $(nr_syscalls), %eax
13109 jnae syscall_call
13110 jmp syscall_exit
13111-END(syscall_trace_entry)
13112+ENDPROC(syscall_trace_entry)
13113
13114 # perform syscall exit tracing
13115 ALIGN
13116@@ -666,20 +874,24 @@ syscall_exit_work:
13117 movl %esp, %eax
13118 call syscall_trace_leave
13119 jmp resume_userspace
13120-END(syscall_exit_work)
13121+ENDPROC(syscall_exit_work)
13122 CFI_ENDPROC
13123
13124 RING0_INT_FRAME # can't unwind into user space anyway
13125 syscall_fault:
13126+#ifdef CONFIG_PAX_MEMORY_UDEREF
13127+ push %ss
13128+ pop %ds
13129+#endif
13130 GET_THREAD_INFO(%ebp)
13131 movl $-EFAULT,PT_EAX(%esp)
13132 jmp resume_userspace
13133-END(syscall_fault)
13134+ENDPROC(syscall_fault)
13135
13136 syscall_badsys:
13137 movl $-ENOSYS,PT_EAX(%esp)
13138 jmp resume_userspace
13139-END(syscall_badsys)
13140+ENDPROC(syscall_badsys)
13141 CFI_ENDPROC
13142 /*
13143 * End of kprobes section
13144@@ -753,6 +965,36 @@ ptregs_clone:
13145 CFI_ENDPROC
13146 ENDPROC(ptregs_clone)
13147
13148+ ALIGN;
13149+ENTRY(kernel_execve)
13150+ CFI_STARTPROC
13151+ pushl_cfi %ebp
13152+ sub $PT_OLDSS+4,%esp
13153+ pushl_cfi %edi
13154+ pushl_cfi %ecx
13155+ pushl_cfi %eax
13156+ lea 3*4(%esp),%edi
13157+ mov $PT_OLDSS/4+1,%ecx
13158+ xorl %eax,%eax
13159+ rep stosl
13160+ popl_cfi %eax
13161+ popl_cfi %ecx
13162+ popl_cfi %edi
13163+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13164+ pushl_cfi %esp
13165+ call sys_execve
13166+ add $4,%esp
13167+ CFI_ADJUST_CFA_OFFSET -4
13168+ GET_THREAD_INFO(%ebp)
13169+ test %eax,%eax
13170+ jz syscall_exit
13171+ add $PT_OLDSS+4,%esp
13172+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13173+ popl_cfi %ebp
13174+ ret
13175+ CFI_ENDPROC
13176+ENDPROC(kernel_execve)
13177+
13178 .macro FIXUP_ESPFIX_STACK
13179 /*
13180 * Switch back for ESPFIX stack to the normal zerobased stack
13181@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13182 * normal stack and adjusts ESP with the matching offset.
13183 */
13184 /* fixup the stack */
13185- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13186- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13187+#ifdef CONFIG_SMP
13188+ movl PER_CPU_VAR(cpu_number), %ebx
13189+ shll $PAGE_SHIFT_asm, %ebx
13190+ addl $cpu_gdt_table, %ebx
13191+#else
13192+ movl $cpu_gdt_table, %ebx
13193+#endif
13194+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13195+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13196 shl $16, %eax
13197 addl %esp, %eax /* the adjusted stack pointer */
13198 pushl_cfi $__KERNEL_DS
13199@@ -816,7 +1065,7 @@ vector=vector+1
13200 .endr
13201 2: jmp common_interrupt
13202 .endr
13203-END(irq_entries_start)
13204+ENDPROC(irq_entries_start)
13205
13206 .previous
13207 END(interrupt)
13208@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13209 pushl_cfi $do_coprocessor_error
13210 jmp error_code
13211 CFI_ENDPROC
13212-END(coprocessor_error)
13213+ENDPROC(coprocessor_error)
13214
13215 ENTRY(simd_coprocessor_error)
13216 RING0_INT_FRAME
13217@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13218 #endif
13219 jmp error_code
13220 CFI_ENDPROC
13221-END(simd_coprocessor_error)
13222+ENDPROC(simd_coprocessor_error)
13223
13224 ENTRY(device_not_available)
13225 RING0_INT_FRAME
13226@@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13227 pushl_cfi $do_device_not_available
13228 jmp error_code
13229 CFI_ENDPROC
13230-END(device_not_available)
13231+ENDPROC(device_not_available)
13232
13233 #ifdef CONFIG_PARAVIRT
13234 ENTRY(native_iret)
13235@@ -902,12 +1151,12 @@ ENTRY(native_iret)
13236 .align 4
13237 .long native_iret, iret_exc
13238 .previous
13239-END(native_iret)
13240+ENDPROC(native_iret)
13241
13242 ENTRY(native_irq_enable_sysexit)
13243 sti
13244 sysexit
13245-END(native_irq_enable_sysexit)
13246+ENDPROC(native_irq_enable_sysexit)
13247 #endif
13248
13249 ENTRY(overflow)
13250@@ -916,7 +1165,7 @@ ENTRY(overflow)
13251 pushl_cfi $do_overflow
13252 jmp error_code
13253 CFI_ENDPROC
13254-END(overflow)
13255+ENDPROC(overflow)
13256
13257 ENTRY(bounds)
13258 RING0_INT_FRAME
13259@@ -924,7 +1173,7 @@ ENTRY(bounds)
13260 pushl_cfi $do_bounds
13261 jmp error_code
13262 CFI_ENDPROC
13263-END(bounds)
13264+ENDPROC(bounds)
13265
13266 ENTRY(invalid_op)
13267 RING0_INT_FRAME
13268@@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13269 pushl_cfi $do_invalid_op
13270 jmp error_code
13271 CFI_ENDPROC
13272-END(invalid_op)
13273+ENDPROC(invalid_op)
13274
13275 ENTRY(coprocessor_segment_overrun)
13276 RING0_INT_FRAME
13277@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13278 pushl_cfi $do_coprocessor_segment_overrun
13279 jmp error_code
13280 CFI_ENDPROC
13281-END(coprocessor_segment_overrun)
13282+ENDPROC(coprocessor_segment_overrun)
13283
13284 ENTRY(invalid_TSS)
13285 RING0_EC_FRAME
13286 pushl_cfi $do_invalid_TSS
13287 jmp error_code
13288 CFI_ENDPROC
13289-END(invalid_TSS)
13290+ENDPROC(invalid_TSS)
13291
13292 ENTRY(segment_not_present)
13293 RING0_EC_FRAME
13294 pushl_cfi $do_segment_not_present
13295 jmp error_code
13296 CFI_ENDPROC
13297-END(segment_not_present)
13298+ENDPROC(segment_not_present)
13299
13300 ENTRY(stack_segment)
13301 RING0_EC_FRAME
13302 pushl_cfi $do_stack_segment
13303 jmp error_code
13304 CFI_ENDPROC
13305-END(stack_segment)
13306+ENDPROC(stack_segment)
13307
13308 ENTRY(alignment_check)
13309 RING0_EC_FRAME
13310 pushl_cfi $do_alignment_check
13311 jmp error_code
13312 CFI_ENDPROC
13313-END(alignment_check)
13314+ENDPROC(alignment_check)
13315
13316 ENTRY(divide_error)
13317 RING0_INT_FRAME
13318@@ -976,7 +1225,7 @@ ENTRY(divide_error)
13319 pushl_cfi $do_divide_error
13320 jmp error_code
13321 CFI_ENDPROC
13322-END(divide_error)
13323+ENDPROC(divide_error)
13324
13325 #ifdef CONFIG_X86_MCE
13326 ENTRY(machine_check)
13327@@ -985,7 +1234,7 @@ ENTRY(machine_check)
13328 pushl_cfi machine_check_vector
13329 jmp error_code
13330 CFI_ENDPROC
13331-END(machine_check)
13332+ENDPROC(machine_check)
13333 #endif
13334
13335 ENTRY(spurious_interrupt_bug)
13336@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13337 pushl_cfi $do_spurious_interrupt_bug
13338 jmp error_code
13339 CFI_ENDPROC
13340-END(spurious_interrupt_bug)
13341+ENDPROC(spurious_interrupt_bug)
13342 /*
13343 * End of kprobes section
13344 */
13345@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13346
13347 ENTRY(mcount)
13348 ret
13349-END(mcount)
13350+ENDPROC(mcount)
13351
13352 ENTRY(ftrace_caller)
13353 cmpl $0, function_trace_stop
13354@@ -1138,7 +1387,7 @@ ftrace_graph_call:
13355 .globl ftrace_stub
13356 ftrace_stub:
13357 ret
13358-END(ftrace_caller)
13359+ENDPROC(ftrace_caller)
13360
13361 #else /* ! CONFIG_DYNAMIC_FTRACE */
13362
13363@@ -1174,7 +1423,7 @@ trace:
13364 popl %ecx
13365 popl %eax
13366 jmp ftrace_stub
13367-END(mcount)
13368+ENDPROC(mcount)
13369 #endif /* CONFIG_DYNAMIC_FTRACE */
13370 #endif /* CONFIG_FUNCTION_TRACER */
13371
13372@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13373 popl %ecx
13374 popl %eax
13375 ret
13376-END(ftrace_graph_caller)
13377+ENDPROC(ftrace_graph_caller)
13378
13379 .globl return_to_handler
13380 return_to_handler:
13381@@ -1209,7 +1458,6 @@ return_to_handler:
13382 jmp *%ecx
13383 #endif
13384
13385-.section .rodata,"a"
13386 #include "syscall_table_32.S"
13387
13388 syscall_table_size=(.-sys_call_table)
13389@@ -1255,15 +1503,18 @@ error_code:
13390 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13391 REG_TO_PTGS %ecx
13392 SET_KERNEL_GS %ecx
13393- movl $(__USER_DS), %ecx
13394+ movl $(__KERNEL_DS), %ecx
13395 movl %ecx, %ds
13396 movl %ecx, %es
13397+
13398+ pax_enter_kernel
13399+
13400 TRACE_IRQS_OFF
13401 movl %esp,%eax # pt_regs pointer
13402 call *%edi
13403 jmp ret_from_exception
13404 CFI_ENDPROC
13405-END(page_fault)
13406+ENDPROC(page_fault)
13407
13408 /*
13409 * Debug traps and NMI can happen at the one SYSENTER instruction
13410@@ -1305,7 +1556,7 @@ debug_stack_correct:
13411 call do_debug
13412 jmp ret_from_exception
13413 CFI_ENDPROC
13414-END(debug)
13415+ENDPROC(debug)
13416
13417 /*
13418 * NMI is doubly nasty. It can happen _while_ we're handling
13419@@ -1342,6 +1593,9 @@ nmi_stack_correct:
13420 xorl %edx,%edx # zero error code
13421 movl %esp,%eax # pt_regs pointer
13422 call do_nmi
13423+
13424+ pax_exit_kernel
13425+
13426 jmp restore_all_notrace
13427 CFI_ENDPROC
13428
13429@@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13430 FIXUP_ESPFIX_STACK # %eax == %esp
13431 xorl %edx,%edx # zero error code
13432 call do_nmi
13433+
13434+ pax_exit_kernel
13435+
13436 RESTORE_REGS
13437 lss 12+4(%esp), %esp # back to espfix stack
13438 CFI_ADJUST_CFA_OFFSET -24
13439 jmp irq_return
13440 CFI_ENDPROC
13441-END(nmi)
13442+ENDPROC(nmi)
13443
13444 ENTRY(int3)
13445 RING0_INT_FRAME
13446@@ -1395,14 +1652,14 @@ ENTRY(int3)
13447 call do_int3
13448 jmp ret_from_exception
13449 CFI_ENDPROC
13450-END(int3)
13451+ENDPROC(int3)
13452
13453 ENTRY(general_protection)
13454 RING0_EC_FRAME
13455 pushl_cfi $do_general_protection
13456 jmp error_code
13457 CFI_ENDPROC
13458-END(general_protection)
13459+ENDPROC(general_protection)
13460
13461 #ifdef CONFIG_KVM_GUEST
13462 ENTRY(async_page_fault)
13463@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13464 pushl_cfi $do_async_page_fault
13465 jmp error_code
13466 CFI_ENDPROC
13467-END(async_page_fault)
13468+ENDPROC(async_page_fault)
13469 #endif
13470
13471 /*
13472diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13473index faf8d5e..f58c441 100644
13474--- a/arch/x86/kernel/entry_64.S
13475+++ b/arch/x86/kernel/entry_64.S
13476@@ -55,6 +55,8 @@
13477 #include <asm/paravirt.h>
13478 #include <asm/ftrace.h>
13479 #include <asm/percpu.h>
13480+#include <asm/pgtable.h>
13481+#include <asm/alternative-asm.h>
13482
13483 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13484 #include <linux/elf-em.h>
13485@@ -68,8 +70,9 @@
13486 #ifdef CONFIG_FUNCTION_TRACER
13487 #ifdef CONFIG_DYNAMIC_FTRACE
13488 ENTRY(mcount)
13489+ pax_force_retaddr
13490 retq
13491-END(mcount)
13492+ENDPROC(mcount)
13493
13494 ENTRY(ftrace_caller)
13495 cmpl $0, function_trace_stop
13496@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13497 #endif
13498
13499 GLOBAL(ftrace_stub)
13500+ pax_force_retaddr
13501 retq
13502-END(ftrace_caller)
13503+ENDPROC(ftrace_caller)
13504
13505 #else /* ! CONFIG_DYNAMIC_FTRACE */
13506 ENTRY(mcount)
13507@@ -112,6 +116,7 @@ ENTRY(mcount)
13508 #endif
13509
13510 GLOBAL(ftrace_stub)
13511+ pax_force_retaddr
13512 retq
13513
13514 trace:
13515@@ -121,12 +126,13 @@ trace:
13516 movq 8(%rbp), %rsi
13517 subq $MCOUNT_INSN_SIZE, %rdi
13518
13519+ pax_force_fptr ftrace_trace_function
13520 call *ftrace_trace_function
13521
13522 MCOUNT_RESTORE_FRAME
13523
13524 jmp ftrace_stub
13525-END(mcount)
13526+ENDPROC(mcount)
13527 #endif /* CONFIG_DYNAMIC_FTRACE */
13528 #endif /* CONFIG_FUNCTION_TRACER */
13529
13530@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13531
13532 MCOUNT_RESTORE_FRAME
13533
13534+ pax_force_retaddr
13535 retq
13536-END(ftrace_graph_caller)
13537+ENDPROC(ftrace_graph_caller)
13538
13539 GLOBAL(return_to_handler)
13540 subq $24, %rsp
13541@@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13542 movq 8(%rsp), %rdx
13543 movq (%rsp), %rax
13544 addq $24, %rsp
13545+ pax_force_fptr %rdi
13546 jmp *%rdi
13547 #endif
13548
13549@@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13550 ENDPROC(native_usergs_sysret64)
13551 #endif /* CONFIG_PARAVIRT */
13552
13553+ .macro ljmpq sel, off
13554+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13555+ .byte 0x48; ljmp *1234f(%rip)
13556+ .pushsection .rodata
13557+ .align 16
13558+ 1234: .quad \off; .word \sel
13559+ .popsection
13560+#else
13561+ pushq $\sel
13562+ pushq $\off
13563+ lretq
13564+#endif
13565+ .endm
13566+
13567+ .macro pax_enter_kernel
13568+ pax_set_fptr_mask
13569+#ifdef CONFIG_PAX_KERNEXEC
13570+ call pax_enter_kernel
13571+#endif
13572+ .endm
13573+
13574+ .macro pax_exit_kernel
13575+#ifdef CONFIG_PAX_KERNEXEC
13576+ call pax_exit_kernel
13577+#endif
13578+ .endm
13579+
13580+#ifdef CONFIG_PAX_KERNEXEC
13581+ENTRY(pax_enter_kernel)
13582+ pushq %rdi
13583+
13584+#ifdef CONFIG_PARAVIRT
13585+ PV_SAVE_REGS(CLBR_RDI)
13586+#endif
13587+
13588+ GET_CR0_INTO_RDI
13589+ bts $16,%rdi
13590+ jnc 3f
13591+ mov %cs,%edi
13592+ cmp $__KERNEL_CS,%edi
13593+ jnz 2f
13594+1:
13595+
13596+#ifdef CONFIG_PARAVIRT
13597+ PV_RESTORE_REGS(CLBR_RDI)
13598+#endif
13599+
13600+ popq %rdi
13601+ pax_force_retaddr
13602+ retq
13603+
13604+2: ljmpq __KERNEL_CS,1f
13605+3: ljmpq __KERNEXEC_KERNEL_CS,4f
13606+4: SET_RDI_INTO_CR0
13607+ jmp 1b
13608+ENDPROC(pax_enter_kernel)
13609+
13610+ENTRY(pax_exit_kernel)
13611+ pushq %rdi
13612+
13613+#ifdef CONFIG_PARAVIRT
13614+ PV_SAVE_REGS(CLBR_RDI)
13615+#endif
13616+
13617+ mov %cs,%rdi
13618+ cmp $__KERNEXEC_KERNEL_CS,%edi
13619+ jz 2f
13620+1:
13621+
13622+#ifdef CONFIG_PARAVIRT
13623+ PV_RESTORE_REGS(CLBR_RDI);
13624+#endif
13625+
13626+ popq %rdi
13627+ pax_force_retaddr
13628+ retq
13629+
13630+2: GET_CR0_INTO_RDI
13631+ btr $16,%rdi
13632+ ljmpq __KERNEL_CS,3f
13633+3: SET_RDI_INTO_CR0
13634+ jmp 1b
13635+#ifdef CONFIG_PARAVIRT
13636+ PV_RESTORE_REGS(CLBR_RDI);
13637+#endif
13638+
13639+ popq %rdi
13640+ pax_force_retaddr
13641+ retq
13642+ENDPROC(pax_exit_kernel)
13643+#endif
13644+
13645+ .macro pax_enter_kernel_user
13646+ pax_set_fptr_mask
13647+#ifdef CONFIG_PAX_MEMORY_UDEREF
13648+ call pax_enter_kernel_user
13649+#endif
13650+ .endm
13651+
13652+ .macro pax_exit_kernel_user
13653+#ifdef CONFIG_PAX_MEMORY_UDEREF
13654+ call pax_exit_kernel_user
13655+#endif
13656+#ifdef CONFIG_PAX_RANDKSTACK
13657+ pushq %rax
13658+ call pax_randomize_kstack
13659+ popq %rax
13660+#endif
13661+ .endm
13662+
13663+#ifdef CONFIG_PAX_MEMORY_UDEREF
13664+ENTRY(pax_enter_kernel_user)
13665+ pushq %rdi
13666+ pushq %rbx
13667+
13668+#ifdef CONFIG_PARAVIRT
13669+ PV_SAVE_REGS(CLBR_RDI)
13670+#endif
13671+
13672+ GET_CR3_INTO_RDI
13673+ mov %rdi,%rbx
13674+ add $__START_KERNEL_map,%rbx
13675+ sub phys_base(%rip),%rbx
13676+
13677+#ifdef CONFIG_PARAVIRT
13678+ pushq %rdi
13679+ cmpl $0, pv_info+PARAVIRT_enabled
13680+ jz 1f
13681+ i = 0
13682+ .rept USER_PGD_PTRS
13683+ mov i*8(%rbx),%rsi
13684+ mov $0,%sil
13685+ lea i*8(%rbx),%rdi
13686+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13687+ i = i + 1
13688+ .endr
13689+ jmp 2f
13690+1:
13691+#endif
13692+
13693+ i = 0
13694+ .rept USER_PGD_PTRS
13695+ movb $0,i*8(%rbx)
13696+ i = i + 1
13697+ .endr
13698+
13699+#ifdef CONFIG_PARAVIRT
13700+2: popq %rdi
13701+#endif
13702+ SET_RDI_INTO_CR3
13703+
13704+#ifdef CONFIG_PAX_KERNEXEC
13705+ GET_CR0_INTO_RDI
13706+ bts $16,%rdi
13707+ SET_RDI_INTO_CR0
13708+#endif
13709+
13710+#ifdef CONFIG_PARAVIRT
13711+ PV_RESTORE_REGS(CLBR_RDI)
13712+#endif
13713+
13714+ popq %rbx
13715+ popq %rdi
13716+ pax_force_retaddr
13717+ retq
13718+ENDPROC(pax_enter_kernel_user)
13719+
13720+ENTRY(pax_exit_kernel_user)
13721+ push %rdi
13722+
13723+#ifdef CONFIG_PARAVIRT
13724+ pushq %rbx
13725+ PV_SAVE_REGS(CLBR_RDI)
13726+#endif
13727+
13728+#ifdef CONFIG_PAX_KERNEXEC
13729+ GET_CR0_INTO_RDI
13730+ btr $16,%rdi
13731+ SET_RDI_INTO_CR0
13732+#endif
13733+
13734+ GET_CR3_INTO_RDI
13735+ add $__START_KERNEL_map,%rdi
13736+ sub phys_base(%rip),%rdi
13737+
13738+#ifdef CONFIG_PARAVIRT
13739+ cmpl $0, pv_info+PARAVIRT_enabled
13740+ jz 1f
13741+ mov %rdi,%rbx
13742+ i = 0
13743+ .rept USER_PGD_PTRS
13744+ mov i*8(%rbx),%rsi
13745+ mov $0x67,%sil
13746+ lea i*8(%rbx),%rdi
13747+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13748+ i = i + 1
13749+ .endr
13750+ jmp 2f
13751+1:
13752+#endif
13753+
13754+ i = 0
13755+ .rept USER_PGD_PTRS
13756+ movb $0x67,i*8(%rdi)
13757+ i = i + 1
13758+ .endr
13759+
13760+#ifdef CONFIG_PARAVIRT
13761+2: PV_RESTORE_REGS(CLBR_RDI)
13762+ popq %rbx
13763+#endif
13764+
13765+ popq %rdi
13766+ pax_force_retaddr
13767+ retq
13768+ENDPROC(pax_exit_kernel_user)
13769+#endif
13770+
13771+.macro pax_erase_kstack
13772+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13773+ call pax_erase_kstack
13774+#endif
13775+.endm
13776+
13777+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13778+/*
13779+ * r11: thread_info
13780+ * rcx, rdx: can be clobbered
13781+ */
13782+ENTRY(pax_erase_kstack)
13783+ pushq %rdi
13784+ pushq %rax
13785+ pushq %r11
13786+
13787+ GET_THREAD_INFO(%r11)
13788+ mov TI_lowest_stack(%r11), %rdi
13789+ mov $-0xBEEF, %rax
13790+ std
13791+
13792+1: mov %edi, %ecx
13793+ and $THREAD_SIZE_asm - 1, %ecx
13794+ shr $3, %ecx
13795+ repne scasq
13796+ jecxz 2f
13797+
13798+ cmp $2*8, %ecx
13799+ jc 2f
13800+
13801+ mov $2*8, %ecx
13802+ repe scasq
13803+ jecxz 2f
13804+ jne 1b
13805+
13806+2: cld
13807+ mov %esp, %ecx
13808+ sub %edi, %ecx
13809+
13810+ cmp $THREAD_SIZE_asm, %rcx
13811+ jb 3f
13812+ ud2
13813+3:
13814+
13815+ shr $3, %ecx
13816+ rep stosq
13817+
13818+ mov TI_task_thread_sp0(%r11), %rdi
13819+ sub $256, %rdi
13820+ mov %rdi, TI_lowest_stack(%r11)
13821+
13822+ popq %r11
13823+ popq %rax
13824+ popq %rdi
13825+ pax_force_retaddr
13826+ ret
13827+ENDPROC(pax_erase_kstack)
13828+#endif
13829
13830 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13831 #ifdef CONFIG_TRACE_IRQFLAGS
13832@@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
13833 .endm
13834
13835 .macro UNFAKE_STACK_FRAME
13836- addq $8*6, %rsp
13837- CFI_ADJUST_CFA_OFFSET -(6*8)
13838+ addq $8*6 + ARG_SKIP, %rsp
13839+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
13840 .endm
13841
13842 /*
13843@@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
13844 movq %rsp, %rsi
13845
13846 leaq -RBP(%rsp),%rdi /* arg1 for handler */
13847- testl $3, CS(%rdi)
13848+ testb $3, CS(%rdi)
13849 je 1f
13850 SWAPGS
13851 /*
13852@@ -355,9 +639,10 @@ ENTRY(save_rest)
13853 movq_cfi r15, R15+16
13854 movq %r11, 8(%rsp) /* return address */
13855 FIXUP_TOP_OF_STACK %r11, 16
13856+ pax_force_retaddr
13857 ret
13858 CFI_ENDPROC
13859-END(save_rest)
13860+ENDPROC(save_rest)
13861
13862 /* save complete stack frame */
13863 .pushsection .kprobes.text, "ax"
13864@@ -386,9 +671,10 @@ ENTRY(save_paranoid)
13865 js 1f /* negative -> in kernel */
13866 SWAPGS
13867 xorl %ebx,%ebx
13868-1: ret
13869+1: pax_force_retaddr_bts
13870+ ret
13871 CFI_ENDPROC
13872-END(save_paranoid)
13873+ENDPROC(save_paranoid)
13874 .popsection
13875
13876 /*
13877@@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
13878
13879 RESTORE_REST
13880
13881- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13882+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13883 je int_ret_from_sys_call
13884
13885 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13886@@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
13887 jmp ret_from_sys_call # go to the SYSRET fastpath
13888
13889 CFI_ENDPROC
13890-END(ret_from_fork)
13891+ENDPROC(ret_from_fork)
13892
13893 /*
13894 * System call entry. Up to 6 arguments in registers are supported.
13895@@ -456,7 +742,7 @@ END(ret_from_fork)
13896 ENTRY(system_call)
13897 CFI_STARTPROC simple
13898 CFI_SIGNAL_FRAME
13899- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13900+ CFI_DEF_CFA rsp,0
13901 CFI_REGISTER rip,rcx
13902 /*CFI_REGISTER rflags,r11*/
13903 SWAPGS_UNSAFE_STACK
13904@@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
13905
13906 movq %rsp,PER_CPU_VAR(old_rsp)
13907 movq PER_CPU_VAR(kernel_stack),%rsp
13908+ SAVE_ARGS 8*6,0
13909+ pax_enter_kernel_user
13910 /*
13911 * No need to follow this irqs off/on section - it's straight
13912 * and short:
13913 */
13914 ENABLE_INTERRUPTS(CLBR_NONE)
13915- SAVE_ARGS 8,0
13916 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13917 movq %rcx,RIP-ARGOFFSET(%rsp)
13918 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13919@@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
13920 system_call_fastpath:
13921 cmpq $__NR_syscall_max,%rax
13922 ja badsys
13923- movq %r10,%rcx
13924+ movq R10-ARGOFFSET(%rsp),%rcx
13925 call *sys_call_table(,%rax,8) # XXX: rip relative
13926 movq %rax,RAX-ARGOFFSET(%rsp)
13927 /*
13928@@ -503,6 +790,8 @@ sysret_check:
13929 andl %edi,%edx
13930 jnz sysret_careful
13931 CFI_REMEMBER_STATE
13932+ pax_exit_kernel_user
13933+ pax_erase_kstack
13934 /*
13935 * sysretq will re-enable interrupts:
13936 */
13937@@ -554,14 +843,18 @@ badsys:
13938 * jump back to the normal fast path.
13939 */
13940 auditsys:
13941- movq %r10,%r9 /* 6th arg: 4th syscall arg */
13942+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
13943 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
13944 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
13945 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
13946 movq %rax,%rsi /* 2nd arg: syscall number */
13947 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13948 call audit_syscall_entry
13949+
13950+ pax_erase_kstack
13951+
13952 LOAD_ARGS 0 /* reload call-clobbered registers */
13953+ pax_set_fptr_mask
13954 jmp system_call_fastpath
13955
13956 /*
13957@@ -591,16 +884,20 @@ tracesys:
13958 FIXUP_TOP_OF_STACK %rdi
13959 movq %rsp,%rdi
13960 call syscall_trace_enter
13961+
13962+ pax_erase_kstack
13963+
13964 /*
13965 * Reload arg registers from stack in case ptrace changed them.
13966 * We don't reload %rax because syscall_trace_enter() returned
13967 * the value it wants us to use in the table lookup.
13968 */
13969 LOAD_ARGS ARGOFFSET, 1
13970+ pax_set_fptr_mask
13971 RESTORE_REST
13972 cmpq $__NR_syscall_max,%rax
13973 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
13974- movq %r10,%rcx /* fixup for C */
13975+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
13976 call *sys_call_table(,%rax,8)
13977 movq %rax,RAX-ARGOFFSET(%rsp)
13978 /* Use IRET because user could have changed frame */
13979@@ -612,7 +909,7 @@ tracesys:
13980 GLOBAL(int_ret_from_sys_call)
13981 DISABLE_INTERRUPTS(CLBR_NONE)
13982 TRACE_IRQS_OFF
13983- testl $3,CS-ARGOFFSET(%rsp)
13984+ testb $3,CS-ARGOFFSET(%rsp)
13985 je retint_restore_args
13986 movl $_TIF_ALLWORK_MASK,%edi
13987 /* edi: mask to check */
13988@@ -669,7 +966,7 @@ int_restore_rest:
13989 TRACE_IRQS_OFF
13990 jmp int_with_check
13991 CFI_ENDPROC
13992-END(system_call)
13993+ENDPROC(system_call)
13994
13995 /*
13996 * Certain special system calls that need to save a complete full stack frame.
13997@@ -685,7 +982,7 @@ ENTRY(\label)
13998 call \func
13999 jmp ptregscall_common
14000 CFI_ENDPROC
14001-END(\label)
14002+ENDPROC(\label)
14003 .endm
14004
14005 PTREGSCALL stub_clone, sys_clone, %r8
14006@@ -703,9 +1000,10 @@ ENTRY(ptregscall_common)
14007 movq_cfi_restore R12+8, r12
14008 movq_cfi_restore RBP+8, rbp
14009 movq_cfi_restore RBX+8, rbx
14010+ pax_force_retaddr
14011 ret $REST_SKIP /* pop extended registers */
14012 CFI_ENDPROC
14013-END(ptregscall_common)
14014+ENDPROC(ptregscall_common)
14015
14016 ENTRY(stub_execve)
14017 CFI_STARTPROC
14018@@ -720,7 +1018,7 @@ ENTRY(stub_execve)
14019 RESTORE_REST
14020 jmp int_ret_from_sys_call
14021 CFI_ENDPROC
14022-END(stub_execve)
14023+ENDPROC(stub_execve)
14024
14025 /*
14026 * sigreturn is special because it needs to restore all registers on return.
14027@@ -738,7 +1036,7 @@ ENTRY(stub_rt_sigreturn)
14028 RESTORE_REST
14029 jmp int_ret_from_sys_call
14030 CFI_ENDPROC
14031-END(stub_rt_sigreturn)
14032+ENDPROC(stub_rt_sigreturn)
14033
14034 /*
14035 * Build the entry stubs and pointer table with some assembler magic.
14036@@ -773,7 +1071,7 @@ vector=vector+1
14037 2: jmp common_interrupt
14038 .endr
14039 CFI_ENDPROC
14040-END(irq_entries_start)
14041+ENDPROC(irq_entries_start)
14042
14043 .previous
14044 END(interrupt)
14045@@ -793,6 +1091,16 @@ END(interrupt)
14046 subq $ORIG_RAX-RBP, %rsp
14047 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
14048 SAVE_ARGS_IRQ
14049+#ifdef CONFIG_PAX_MEMORY_UDEREF
14050+ testb $3, CS(%rdi)
14051+ jnz 1f
14052+ pax_enter_kernel
14053+ jmp 2f
14054+1: pax_enter_kernel_user
14055+2:
14056+#else
14057+ pax_enter_kernel
14058+#endif
14059 call \func
14060 .endm
14061
14062@@ -824,7 +1132,7 @@ ret_from_intr:
14063
14064 exit_intr:
14065 GET_THREAD_INFO(%rcx)
14066- testl $3,CS-ARGOFFSET(%rsp)
14067+ testb $3,CS-ARGOFFSET(%rsp)
14068 je retint_kernel
14069
14070 /* Interrupt came from user space */
14071@@ -846,12 +1154,16 @@ retint_swapgs: /* return to user-space */
14072 * The iretq could re-enable interrupts:
14073 */
14074 DISABLE_INTERRUPTS(CLBR_ANY)
14075+ pax_exit_kernel_user
14076+ pax_erase_kstack
14077 TRACE_IRQS_IRETQ
14078 SWAPGS
14079 jmp restore_args
14080
14081 retint_restore_args: /* return to kernel space */
14082 DISABLE_INTERRUPTS(CLBR_ANY)
14083+ pax_exit_kernel
14084+ pax_force_retaddr RIP-ARGOFFSET
14085 /*
14086 * The iretq could re-enable interrupts:
14087 */
14088@@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
14089 #endif
14090
14091 CFI_ENDPROC
14092-END(common_interrupt)
14093+ENDPROC(common_interrupt)
14094 /*
14095 * End of kprobes section
14096 */
14097@@ -956,7 +1268,7 @@ ENTRY(\sym)
14098 interrupt \do_sym
14099 jmp ret_from_intr
14100 CFI_ENDPROC
14101-END(\sym)
14102+ENDPROC(\sym)
14103 .endm
14104
14105 #ifdef CONFIG_SMP
14106@@ -1021,12 +1333,22 @@ ENTRY(\sym)
14107 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14108 call error_entry
14109 DEFAULT_FRAME 0
14110+#ifdef CONFIG_PAX_MEMORY_UDEREF
14111+ testb $3, CS(%rsp)
14112+ jnz 1f
14113+ pax_enter_kernel
14114+ jmp 2f
14115+1: pax_enter_kernel_user
14116+2:
14117+#else
14118+ pax_enter_kernel
14119+#endif
14120 movq %rsp,%rdi /* pt_regs pointer */
14121 xorl %esi,%esi /* no error code */
14122 call \do_sym
14123 jmp error_exit /* %ebx: no swapgs flag */
14124 CFI_ENDPROC
14125-END(\sym)
14126+ENDPROC(\sym)
14127 .endm
14128
14129 .macro paranoidzeroentry sym do_sym
14130@@ -1038,15 +1360,25 @@ ENTRY(\sym)
14131 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14132 call save_paranoid
14133 TRACE_IRQS_OFF
14134+#ifdef CONFIG_PAX_MEMORY_UDEREF
14135+ testb $3, CS(%rsp)
14136+ jnz 1f
14137+ pax_enter_kernel
14138+ jmp 2f
14139+1: pax_enter_kernel_user
14140+2:
14141+#else
14142+ pax_enter_kernel
14143+#endif
14144 movq %rsp,%rdi /* pt_regs pointer */
14145 xorl %esi,%esi /* no error code */
14146 call \do_sym
14147 jmp paranoid_exit /* %ebx: no swapgs flag */
14148 CFI_ENDPROC
14149-END(\sym)
14150+ENDPROC(\sym)
14151 .endm
14152
14153-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14154+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14155 .macro paranoidzeroentry_ist sym do_sym ist
14156 ENTRY(\sym)
14157 INTR_FRAME
14158@@ -1056,14 +1388,30 @@ ENTRY(\sym)
14159 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14160 call save_paranoid
14161 TRACE_IRQS_OFF
14162+#ifdef CONFIG_PAX_MEMORY_UDEREF
14163+ testb $3, CS(%rsp)
14164+ jnz 1f
14165+ pax_enter_kernel
14166+ jmp 2f
14167+1: pax_enter_kernel_user
14168+2:
14169+#else
14170+ pax_enter_kernel
14171+#endif
14172 movq %rsp,%rdi /* pt_regs pointer */
14173 xorl %esi,%esi /* no error code */
14174+#ifdef CONFIG_SMP
14175+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14176+ lea init_tss(%r12), %r12
14177+#else
14178+ lea init_tss(%rip), %r12
14179+#endif
14180 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14181 call \do_sym
14182 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14183 jmp paranoid_exit /* %ebx: no swapgs flag */
14184 CFI_ENDPROC
14185-END(\sym)
14186+ENDPROC(\sym)
14187 .endm
14188
14189 .macro errorentry sym do_sym
14190@@ -1074,13 +1422,23 @@ ENTRY(\sym)
14191 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14192 call error_entry
14193 DEFAULT_FRAME 0
14194+#ifdef CONFIG_PAX_MEMORY_UDEREF
14195+ testb $3, CS(%rsp)
14196+ jnz 1f
14197+ pax_enter_kernel
14198+ jmp 2f
14199+1: pax_enter_kernel_user
14200+2:
14201+#else
14202+ pax_enter_kernel
14203+#endif
14204 movq %rsp,%rdi /* pt_regs pointer */
14205 movq ORIG_RAX(%rsp),%rsi /* get error code */
14206 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14207 call \do_sym
14208 jmp error_exit /* %ebx: no swapgs flag */
14209 CFI_ENDPROC
14210-END(\sym)
14211+ENDPROC(\sym)
14212 .endm
14213
14214 /* error code is on the stack already */
14215@@ -1093,13 +1451,23 @@ ENTRY(\sym)
14216 call save_paranoid
14217 DEFAULT_FRAME 0
14218 TRACE_IRQS_OFF
14219+#ifdef CONFIG_PAX_MEMORY_UDEREF
14220+ testb $3, CS(%rsp)
14221+ jnz 1f
14222+ pax_enter_kernel
14223+ jmp 2f
14224+1: pax_enter_kernel_user
14225+2:
14226+#else
14227+ pax_enter_kernel
14228+#endif
14229 movq %rsp,%rdi /* pt_regs pointer */
14230 movq ORIG_RAX(%rsp),%rsi /* get error code */
14231 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14232 call \do_sym
14233 jmp paranoid_exit /* %ebx: no swapgs flag */
14234 CFI_ENDPROC
14235-END(\sym)
14236+ENDPROC(\sym)
14237 .endm
14238
14239 zeroentry divide_error do_divide_error
14240@@ -1129,9 +1497,10 @@ gs_change:
14241 2: mfence /* workaround */
14242 SWAPGS
14243 popfq_cfi
14244+ pax_force_retaddr
14245 ret
14246 CFI_ENDPROC
14247-END(native_load_gs_index)
14248+ENDPROC(native_load_gs_index)
14249
14250 .section __ex_table,"a"
14251 .align 8
14252@@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
14253 * Here we are in the child and the registers are set as they were
14254 * at kernel_thread() invocation in the parent.
14255 */
14256+ pax_force_fptr %rsi
14257 call *%rsi
14258 # exit
14259 mov %eax, %edi
14260 call do_exit
14261 ud2 # padding for call trace
14262 CFI_ENDPROC
14263-END(kernel_thread_helper)
14264+ENDPROC(kernel_thread_helper)
14265
14266 /*
14267 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14268@@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
14269 RESTORE_REST
14270 testq %rax,%rax
14271 je int_ret_from_sys_call
14272- RESTORE_ARGS
14273 UNFAKE_STACK_FRAME
14274+ pax_force_retaddr
14275 ret
14276 CFI_ENDPROC
14277-END(kernel_execve)
14278+ENDPROC(kernel_execve)
14279
14280 /* Call softirq on interrupt stack. Interrupts are off. */
14281 ENTRY(call_softirq)
14282@@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
14283 CFI_DEF_CFA_REGISTER rsp
14284 CFI_ADJUST_CFA_OFFSET -8
14285 decl PER_CPU_VAR(irq_count)
14286+ pax_force_retaddr
14287 ret
14288 CFI_ENDPROC
14289-END(call_softirq)
14290+ENDPROC(call_softirq)
14291
14292 #ifdef CONFIG_XEN
14293 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14294@@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14295 decl PER_CPU_VAR(irq_count)
14296 jmp error_exit
14297 CFI_ENDPROC
14298-END(xen_do_hypervisor_callback)
14299+ENDPROC(xen_do_hypervisor_callback)
14300
14301 /*
14302 * Hypervisor uses this for application faults while it executes.
14303@@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
14304 SAVE_ALL
14305 jmp error_exit
14306 CFI_ENDPROC
14307-END(xen_failsafe_callback)
14308+ENDPROC(xen_failsafe_callback)
14309
14310 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14311 xen_hvm_callback_vector xen_evtchn_do_upcall
14312@@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
14313 TRACE_IRQS_OFF
14314 testl %ebx,%ebx /* swapgs needed? */
14315 jnz paranoid_restore
14316- testl $3,CS(%rsp)
14317+ testb $3,CS(%rsp)
14318 jnz paranoid_userspace
14319+#ifdef CONFIG_PAX_MEMORY_UDEREF
14320+ pax_exit_kernel
14321+ TRACE_IRQS_IRETQ 0
14322+ SWAPGS_UNSAFE_STACK
14323+ RESTORE_ALL 8
14324+ pax_force_retaddr_bts
14325+ jmp irq_return
14326+#endif
14327 paranoid_swapgs:
14328+#ifdef CONFIG_PAX_MEMORY_UDEREF
14329+ pax_exit_kernel_user
14330+#else
14331+ pax_exit_kernel
14332+#endif
14333 TRACE_IRQS_IRETQ 0
14334 SWAPGS_UNSAFE_STACK
14335 RESTORE_ALL 8
14336 jmp irq_return
14337 paranoid_restore:
14338+ pax_exit_kernel
14339 TRACE_IRQS_IRETQ 0
14340 RESTORE_ALL 8
14341+ pax_force_retaddr_bts
14342 jmp irq_return
14343 paranoid_userspace:
14344 GET_THREAD_INFO(%rcx)
14345@@ -1394,7 +1780,7 @@ paranoid_schedule:
14346 TRACE_IRQS_OFF
14347 jmp paranoid_userspace
14348 CFI_ENDPROC
14349-END(paranoid_exit)
14350+ENDPROC(paranoid_exit)
14351
14352 /*
14353 * Exception entry point. This expects an error code/orig_rax on the stack.
14354@@ -1421,12 +1807,13 @@ ENTRY(error_entry)
14355 movq_cfi r14, R14+8
14356 movq_cfi r15, R15+8
14357 xorl %ebx,%ebx
14358- testl $3,CS+8(%rsp)
14359+ testb $3,CS+8(%rsp)
14360 je error_kernelspace
14361 error_swapgs:
14362 SWAPGS
14363 error_sti:
14364 TRACE_IRQS_OFF
14365+ pax_force_retaddr_bts
14366 ret
14367
14368 /*
14369@@ -1453,7 +1840,7 @@ bstep_iret:
14370 movq %rcx,RIP+8(%rsp)
14371 jmp error_swapgs
14372 CFI_ENDPROC
14373-END(error_entry)
14374+ENDPROC(error_entry)
14375
14376
14377 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14378@@ -1473,7 +1860,7 @@ ENTRY(error_exit)
14379 jnz retint_careful
14380 jmp retint_swapgs
14381 CFI_ENDPROC
14382-END(error_exit)
14383+ENDPROC(error_exit)
14384
14385
14386 /* runs on exception stack */
14387@@ -1485,6 +1872,16 @@ ENTRY(nmi)
14388 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14389 call save_paranoid
14390 DEFAULT_FRAME 0
14391+#ifdef CONFIG_PAX_MEMORY_UDEREF
14392+ testb $3, CS(%rsp)
14393+ jnz 1f
14394+ pax_enter_kernel
14395+ jmp 2f
14396+1: pax_enter_kernel_user
14397+2:
14398+#else
14399+ pax_enter_kernel
14400+#endif
14401 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14402 movq %rsp,%rdi
14403 movq $-1,%rsi
14404@@ -1495,12 +1892,28 @@ ENTRY(nmi)
14405 DISABLE_INTERRUPTS(CLBR_NONE)
14406 testl %ebx,%ebx /* swapgs needed? */
14407 jnz nmi_restore
14408- testl $3,CS(%rsp)
14409+ testb $3,CS(%rsp)
14410 jnz nmi_userspace
14411+#ifdef CONFIG_PAX_MEMORY_UDEREF
14412+ pax_exit_kernel
14413+ SWAPGS_UNSAFE_STACK
14414+ RESTORE_ALL 8
14415+ pax_force_retaddr_bts
14416+ jmp irq_return
14417+#endif
14418 nmi_swapgs:
14419+#ifdef CONFIG_PAX_MEMORY_UDEREF
14420+ pax_exit_kernel_user
14421+#else
14422+ pax_exit_kernel
14423+#endif
14424 SWAPGS_UNSAFE_STACK
14425+ RESTORE_ALL 8
14426+ jmp irq_return
14427 nmi_restore:
14428+ pax_exit_kernel
14429 RESTORE_ALL 8
14430+ pax_force_retaddr_bts
14431 jmp irq_return
14432 nmi_userspace:
14433 GET_THREAD_INFO(%rcx)
14434@@ -1529,14 +1942,14 @@ nmi_schedule:
14435 jmp paranoid_exit
14436 CFI_ENDPROC
14437 #endif
14438-END(nmi)
14439+ENDPROC(nmi)
14440
14441 ENTRY(ignore_sysret)
14442 CFI_STARTPROC
14443 mov $-ENOSYS,%eax
14444 sysret
14445 CFI_ENDPROC
14446-END(ignore_sysret)
14447+ENDPROC(ignore_sysret)
14448
14449 /*
14450 * End of kprobes section
14451diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14452index c9a281f..ce2f317 100644
14453--- a/arch/x86/kernel/ftrace.c
14454+++ b/arch/x86/kernel/ftrace.c
14455@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14456 static const void *mod_code_newcode; /* holds the text to write to the IP */
14457
14458 static unsigned nmi_wait_count;
14459-static atomic_t nmi_update_count = ATOMIC_INIT(0);
14460+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14461
14462 int ftrace_arch_read_dyn_info(char *buf, int size)
14463 {
14464@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14465
14466 r = snprintf(buf, size, "%u %u",
14467 nmi_wait_count,
14468- atomic_read(&nmi_update_count));
14469+ atomic_read_unchecked(&nmi_update_count));
14470 return r;
14471 }
14472
14473@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14474
14475 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14476 smp_rmb();
14477+ pax_open_kernel();
14478 ftrace_mod_code();
14479- atomic_inc(&nmi_update_count);
14480+ pax_close_kernel();
14481+ atomic_inc_unchecked(&nmi_update_count);
14482 }
14483 /* Must have previous changes seen before executions */
14484 smp_mb();
14485@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14486 {
14487 unsigned char replaced[MCOUNT_INSN_SIZE];
14488
14489+ ip = ktla_ktva(ip);
14490+
14491 /*
14492 * Note: Due to modules and __init, code can
14493 * disappear and change, we need to protect against faulting
14494@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14495 unsigned char old[MCOUNT_INSN_SIZE], *new;
14496 int ret;
14497
14498- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14499+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14500 new = ftrace_call_replace(ip, (unsigned long)func);
14501 ret = ftrace_modify_code(ip, old, new);
14502
14503@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14504 {
14505 unsigned char code[MCOUNT_INSN_SIZE];
14506
14507+ ip = ktla_ktva(ip);
14508+
14509 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14510 return -EFAULT;
14511
14512diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14513index 3bb0850..55a56f4 100644
14514--- a/arch/x86/kernel/head32.c
14515+++ b/arch/x86/kernel/head32.c
14516@@ -19,6 +19,7 @@
14517 #include <asm/io_apic.h>
14518 #include <asm/bios_ebda.h>
14519 #include <asm/tlbflush.h>
14520+#include <asm/boot.h>
14521
14522 static void __init i386_default_early_setup(void)
14523 {
14524@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14525 {
14526 memblock_init();
14527
14528- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14529+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14530
14531 #ifdef CONFIG_BLK_DEV_INITRD
14532 /* Reserve INITRD */
14533diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14534index ce0be7c..c41476e 100644
14535--- a/arch/x86/kernel/head_32.S
14536+++ b/arch/x86/kernel/head_32.S
14537@@ -25,6 +25,12 @@
14538 /* Physical address */
14539 #define pa(X) ((X) - __PAGE_OFFSET)
14540
14541+#ifdef CONFIG_PAX_KERNEXEC
14542+#define ta(X) (X)
14543+#else
14544+#define ta(X) ((X) - __PAGE_OFFSET)
14545+#endif
14546+
14547 /*
14548 * References to members of the new_cpu_data structure.
14549 */
14550@@ -54,11 +60,7 @@
14551 * and small than max_low_pfn, otherwise will waste some page table entries
14552 */
14553
14554-#if PTRS_PER_PMD > 1
14555-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14556-#else
14557-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14558-#endif
14559+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14560
14561 /* Number of possible pages in the lowmem region */
14562 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14563@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14564 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14565
14566 /*
14567+ * Real beginning of normal "text" segment
14568+ */
14569+ENTRY(stext)
14570+ENTRY(_stext)
14571+
14572+/*
14573 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14574 * %esi points to the real-mode code as a 32-bit pointer.
14575 * CS and DS must be 4 GB flat segments, but we don't depend on
14576@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14577 * can.
14578 */
14579 __HEAD
14580+
14581+#ifdef CONFIG_PAX_KERNEXEC
14582+ jmp startup_32
14583+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14584+.fill PAGE_SIZE-5,1,0xcc
14585+#endif
14586+
14587 ENTRY(startup_32)
14588 movl pa(stack_start),%ecx
14589
14590@@ -105,6 +120,57 @@ ENTRY(startup_32)
14591 2:
14592 leal -__PAGE_OFFSET(%ecx),%esp
14593
14594+#ifdef CONFIG_SMP
14595+ movl $pa(cpu_gdt_table),%edi
14596+ movl $__per_cpu_load,%eax
14597+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14598+ rorl $16,%eax
14599+ movb %al,__KERNEL_PERCPU + 4(%edi)
14600+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14601+ movl $__per_cpu_end - 1,%eax
14602+ subl $__per_cpu_start,%eax
14603+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14604+#endif
14605+
14606+#ifdef CONFIG_PAX_MEMORY_UDEREF
14607+ movl $NR_CPUS,%ecx
14608+ movl $pa(cpu_gdt_table),%edi
14609+1:
14610+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14611+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14612+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14613+ addl $PAGE_SIZE_asm,%edi
14614+ loop 1b
14615+#endif
14616+
14617+#ifdef CONFIG_PAX_KERNEXEC
14618+ movl $pa(boot_gdt),%edi
14619+ movl $__LOAD_PHYSICAL_ADDR,%eax
14620+ movw %ax,__BOOT_CS + 2(%edi)
14621+ rorl $16,%eax
14622+ movb %al,__BOOT_CS + 4(%edi)
14623+ movb %ah,__BOOT_CS + 7(%edi)
14624+ rorl $16,%eax
14625+
14626+ ljmp $(__BOOT_CS),$1f
14627+1:
14628+
14629+ movl $NR_CPUS,%ecx
14630+ movl $pa(cpu_gdt_table),%edi
14631+ addl $__PAGE_OFFSET,%eax
14632+1:
14633+ movw %ax,__KERNEL_CS + 2(%edi)
14634+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14635+ rorl $16,%eax
14636+ movb %al,__KERNEL_CS + 4(%edi)
14637+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14638+ movb %ah,__KERNEL_CS + 7(%edi)
14639+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14640+ rorl $16,%eax
14641+ addl $PAGE_SIZE_asm,%edi
14642+ loop 1b
14643+#endif
14644+
14645 /*
14646 * Clear BSS first so that there are no surprises...
14647 */
14648@@ -195,8 +261,11 @@ ENTRY(startup_32)
14649 movl %eax, pa(max_pfn_mapped)
14650
14651 /* Do early initialization of the fixmap area */
14652- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14653- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14654+#ifdef CONFIG_COMPAT_VDSO
14655+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14656+#else
14657+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14658+#endif
14659 #else /* Not PAE */
14660
14661 page_pde_offset = (__PAGE_OFFSET >> 20);
14662@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14663 movl %eax, pa(max_pfn_mapped)
14664
14665 /* Do early initialization of the fixmap area */
14666- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14667- movl %eax,pa(initial_page_table+0xffc)
14668+#ifdef CONFIG_COMPAT_VDSO
14669+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14670+#else
14671+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14672+#endif
14673 #endif
14674
14675 #ifdef CONFIG_PARAVIRT
14676@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14677 cmpl $num_subarch_entries, %eax
14678 jae bad_subarch
14679
14680- movl pa(subarch_entries)(,%eax,4), %eax
14681- subl $__PAGE_OFFSET, %eax
14682- jmp *%eax
14683+ jmp *pa(subarch_entries)(,%eax,4)
14684
14685 bad_subarch:
14686 WEAK(lguest_entry)
14687@@ -255,10 +325,10 @@ WEAK(xen_entry)
14688 __INITDATA
14689
14690 subarch_entries:
14691- .long default_entry /* normal x86/PC */
14692- .long lguest_entry /* lguest hypervisor */
14693- .long xen_entry /* Xen hypervisor */
14694- .long default_entry /* Moorestown MID */
14695+ .long ta(default_entry) /* normal x86/PC */
14696+ .long ta(lguest_entry) /* lguest hypervisor */
14697+ .long ta(xen_entry) /* Xen hypervisor */
14698+ .long ta(default_entry) /* Moorestown MID */
14699 num_subarch_entries = (. - subarch_entries) / 4
14700 .previous
14701 #else
14702@@ -312,6 +382,7 @@ default_entry:
14703 orl %edx,%eax
14704 movl %eax,%cr4
14705
14706+#ifdef CONFIG_X86_PAE
14707 testb $X86_CR4_PAE, %al # check if PAE is enabled
14708 jz 6f
14709
14710@@ -340,6 +411,9 @@ default_entry:
14711 /* Make changes effective */
14712 wrmsr
14713
14714+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14715+#endif
14716+
14717 6:
14718
14719 /*
14720@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14721 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14722 movl %eax,%ss # after changing gdt.
14723
14724- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14725+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14726 movl %eax,%ds
14727 movl %eax,%es
14728
14729@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
14730 */
14731 cmpb $0,ready
14732 jne 1f
14733- movl $gdt_page,%eax
14734+ movl $cpu_gdt_table,%eax
14735 movl $stack_canary,%ecx
14736+#ifdef CONFIG_SMP
14737+ addl $__per_cpu_load,%ecx
14738+#endif
14739 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14740 shrl $16, %ecx
14741 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14742 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14743 1:
14744-#endif
14745 movl $(__KERNEL_STACK_CANARY),%eax
14746+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14747+ movl $(__USER_DS),%eax
14748+#else
14749+ xorl %eax,%eax
14750+#endif
14751 movl %eax,%gs
14752
14753 xorl %eax,%eax # Clear LDT
14754@@ -558,22 +639,22 @@ early_page_fault:
14755 jmp early_fault
14756
14757 early_fault:
14758- cld
14759 #ifdef CONFIG_PRINTK
14760+ cmpl $1,%ss:early_recursion_flag
14761+ je hlt_loop
14762+ incl %ss:early_recursion_flag
14763+ cld
14764 pusha
14765 movl $(__KERNEL_DS),%eax
14766 movl %eax,%ds
14767 movl %eax,%es
14768- cmpl $2,early_recursion_flag
14769- je hlt_loop
14770- incl early_recursion_flag
14771 movl %cr2,%eax
14772 pushl %eax
14773 pushl %edx /* trapno */
14774 pushl $fault_msg
14775 call printk
14776+; call dump_stack
14777 #endif
14778- call dump_stack
14779 hlt_loop:
14780 hlt
14781 jmp hlt_loop
14782@@ -581,8 +662,11 @@ hlt_loop:
14783 /* This is the default interrupt "handler" :-) */
14784 ALIGN
14785 ignore_int:
14786- cld
14787 #ifdef CONFIG_PRINTK
14788+ cmpl $2,%ss:early_recursion_flag
14789+ je hlt_loop
14790+ incl %ss:early_recursion_flag
14791+ cld
14792 pushl %eax
14793 pushl %ecx
14794 pushl %edx
14795@@ -591,9 +675,6 @@ ignore_int:
14796 movl $(__KERNEL_DS),%eax
14797 movl %eax,%ds
14798 movl %eax,%es
14799- cmpl $2,early_recursion_flag
14800- je hlt_loop
14801- incl early_recursion_flag
14802 pushl 16(%esp)
14803 pushl 24(%esp)
14804 pushl 32(%esp)
14805@@ -622,29 +703,43 @@ ENTRY(initial_code)
14806 /*
14807 * BSS section
14808 */
14809-__PAGE_ALIGNED_BSS
14810- .align PAGE_SIZE
14811 #ifdef CONFIG_X86_PAE
14812+.section .initial_pg_pmd,"a",@progbits
14813 initial_pg_pmd:
14814 .fill 1024*KPMDS,4,0
14815 #else
14816+.section .initial_page_table,"a",@progbits
14817 ENTRY(initial_page_table)
14818 .fill 1024,4,0
14819 #endif
14820+.section .initial_pg_fixmap,"a",@progbits
14821 initial_pg_fixmap:
14822 .fill 1024,4,0
14823+.section .empty_zero_page,"a",@progbits
14824 ENTRY(empty_zero_page)
14825 .fill 4096,1,0
14826+.section .swapper_pg_dir,"a",@progbits
14827 ENTRY(swapper_pg_dir)
14828+#ifdef CONFIG_X86_PAE
14829+ .fill 4,8,0
14830+#else
14831 .fill 1024,4,0
14832+#endif
14833+
14834+/*
14835+ * The IDT has to be page-aligned to simplify the Pentium
14836+ * F0 0F bug workaround.. We have a special link segment
14837+ * for this.
14838+ */
14839+.section .idt,"a",@progbits
14840+ENTRY(idt_table)
14841+ .fill 256,8,0
14842
14843 /*
14844 * This starts the data section.
14845 */
14846 #ifdef CONFIG_X86_PAE
14847-__PAGE_ALIGNED_DATA
14848- /* Page-aligned for the benefit of paravirt? */
14849- .align PAGE_SIZE
14850+.section .initial_page_table,"a",@progbits
14851 ENTRY(initial_page_table)
14852 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14853 # if KPMDS == 3
14854@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
14855 # error "Kernel PMDs should be 1, 2 or 3"
14856 # endif
14857 .align PAGE_SIZE /* needs to be page-sized too */
14858+
14859+#ifdef CONFIG_PAX_PER_CPU_PGD
14860+ENTRY(cpu_pgd)
14861+ .rept NR_CPUS
14862+ .fill 4,8,0
14863+ .endr
14864+#endif
14865+
14866 #endif
14867
14868 .data
14869 .balign 4
14870 ENTRY(stack_start)
14871- .long init_thread_union+THREAD_SIZE
14872+ .long init_thread_union+THREAD_SIZE-8
14873
14874+ready: .byte 0
14875+
14876+.section .rodata,"a",@progbits
14877 early_recursion_flag:
14878 .long 0
14879
14880-ready: .byte 0
14881-
14882 int_msg:
14883 .asciz "Unknown interrupt or fault at: %p %p %p\n"
14884
14885@@ -707,7 +811,7 @@ fault_msg:
14886 .word 0 # 32 bit align gdt_desc.address
14887 boot_gdt_descr:
14888 .word __BOOT_DS+7
14889- .long boot_gdt - __PAGE_OFFSET
14890+ .long pa(boot_gdt)
14891
14892 .word 0 # 32-bit align idt_desc.address
14893 idt_descr:
14894@@ -718,7 +822,7 @@ idt_descr:
14895 .word 0 # 32 bit align gdt_desc.address
14896 ENTRY(early_gdt_descr)
14897 .word GDT_ENTRIES*8-1
14898- .long gdt_page /* Overwritten for secondary CPUs */
14899+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14900
14901 /*
14902 * The boot_gdt must mirror the equivalent in setup.S and is
14903@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
14904 .align L1_CACHE_BYTES
14905 ENTRY(boot_gdt)
14906 .fill GDT_ENTRY_BOOT_CS,8,0
14907- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14908- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14909+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14910+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14911+
14912+ .align PAGE_SIZE_asm
14913+ENTRY(cpu_gdt_table)
14914+ .rept NR_CPUS
14915+ .quad 0x0000000000000000 /* NULL descriptor */
14916+ .quad 0x0000000000000000 /* 0x0b reserved */
14917+ .quad 0x0000000000000000 /* 0x13 reserved */
14918+ .quad 0x0000000000000000 /* 0x1b reserved */
14919+
14920+#ifdef CONFIG_PAX_KERNEXEC
14921+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14922+#else
14923+ .quad 0x0000000000000000 /* 0x20 unused */
14924+#endif
14925+
14926+ .quad 0x0000000000000000 /* 0x28 unused */
14927+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14928+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14929+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14930+ .quad 0x0000000000000000 /* 0x4b reserved */
14931+ .quad 0x0000000000000000 /* 0x53 reserved */
14932+ .quad 0x0000000000000000 /* 0x5b reserved */
14933+
14934+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14935+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14936+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14937+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14938+
14939+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14940+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14941+
14942+ /*
14943+ * Segments used for calling PnP BIOS have byte granularity.
14944+ * The code segments and data segments have fixed 64k limits,
14945+ * the transfer segment sizes are set at run time.
14946+ */
14947+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14948+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14949+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14950+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14951+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14952+
14953+ /*
14954+ * The APM segments have byte granularity and their bases
14955+ * are set at run time. All have 64k limits.
14956+ */
14957+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14958+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14959+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14960+
14961+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14962+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14963+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14964+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14965+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14966+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14967+
14968+ /* Be sure this is zeroed to avoid false validations in Xen */
14969+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14970+ .endr
14971diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
14972index e11e394..9aebc5d 100644
14973--- a/arch/x86/kernel/head_64.S
14974+++ b/arch/x86/kernel/head_64.S
14975@@ -19,6 +19,8 @@
14976 #include <asm/cache.h>
14977 #include <asm/processor-flags.h>
14978 #include <asm/percpu.h>
14979+#include <asm/cpufeature.h>
14980+#include <asm/alternative-asm.h>
14981
14982 #ifdef CONFIG_PARAVIRT
14983 #include <asm/asm-offsets.h>
14984@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
14985 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14986 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14987 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14988+L4_VMALLOC_START = pgd_index(VMALLOC_START)
14989+L3_VMALLOC_START = pud_index(VMALLOC_START)
14990+L4_VMALLOC_END = pgd_index(VMALLOC_END)
14991+L3_VMALLOC_END = pud_index(VMALLOC_END)
14992+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14993+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14994
14995 .text
14996 __HEAD
14997@@ -85,35 +93,23 @@ startup_64:
14998 */
14999 addq %rbp, init_level4_pgt + 0(%rip)
15000 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
15001+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
15002+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
15003+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
15004 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
15005
15006 addq %rbp, level3_ident_pgt + 0(%rip)
15007+#ifndef CONFIG_XEN
15008+ addq %rbp, level3_ident_pgt + 8(%rip)
15009+#endif
15010
15011- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
15012- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
15013+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
15014+
15015+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
15016+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
15017
15018 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15019-
15020- /* Add an Identity mapping if I am above 1G */
15021- leaq _text(%rip), %rdi
15022- andq $PMD_PAGE_MASK, %rdi
15023-
15024- movq %rdi, %rax
15025- shrq $PUD_SHIFT, %rax
15026- andq $(PTRS_PER_PUD - 1), %rax
15027- jz ident_complete
15028-
15029- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15030- leaq level3_ident_pgt(%rip), %rbx
15031- movq %rdx, 0(%rbx, %rax, 8)
15032-
15033- movq %rdi, %rax
15034- shrq $PMD_SHIFT, %rax
15035- andq $(PTRS_PER_PMD - 1), %rax
15036- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15037- leaq level2_spare_pgt(%rip), %rbx
15038- movq %rdx, 0(%rbx, %rax, 8)
15039-ident_complete:
15040+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15041
15042 /*
15043 * Fixup the kernel text+data virtual addresses. Note that
15044@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
15045 * after the boot processor executes this code.
15046 */
15047
15048- /* Enable PAE mode and PGE */
15049- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15050+ /* Enable PAE mode and PSE/PGE */
15051+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15052 movq %rax, %cr4
15053
15054 /* Setup early boot stage 4 level pagetables. */
15055@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
15056 movl $MSR_EFER, %ecx
15057 rdmsr
15058 btsl $_EFER_SCE, %eax /* Enable System Call */
15059- btl $20,%edi /* No Execute supported? */
15060+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15061 jnc 1f
15062 btsl $_EFER_NX, %eax
15063+ leaq init_level4_pgt(%rip), %rdi
15064+#ifndef CONFIG_EFI
15065+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15066+#endif
15067+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15068+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15069+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15070+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15071 1: wrmsr /* Make changes effective */
15072
15073 /* Setup cr0 */
15074@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15075 * jump. In addition we need to ensure %cs is set so we make this
15076 * a far return.
15077 */
15078+ pax_set_fptr_mask
15079 movq initial_code(%rip),%rax
15080 pushq $0 # fake return address to stop unwinder
15081 pushq $__KERNEL_CS # set correct cs
15082@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15083 bad_address:
15084 jmp bad_address
15085
15086- .section ".init.text","ax"
15087+ __INIT
15088 #ifdef CONFIG_EARLY_PRINTK
15089 .globl early_idt_handlers
15090 early_idt_handlers:
15091@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15092 #endif /* EARLY_PRINTK */
15093 1: hlt
15094 jmp 1b
15095+ .previous
15096
15097 #ifdef CONFIG_EARLY_PRINTK
15098+ __INITDATA
15099 early_recursion_flag:
15100 .long 0
15101+ .previous
15102
15103+ .section .rodata,"a",@progbits
15104 early_idt_msg:
15105 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15106 early_idt_ripmsg:
15107 .asciz "RIP %s\n"
15108+ .previous
15109 #endif /* CONFIG_EARLY_PRINTK */
15110- .previous
15111
15112+ .section .rodata,"a",@progbits
15113 #define NEXT_PAGE(name) \
15114 .balign PAGE_SIZE; \
15115 ENTRY(name)
15116@@ -338,7 +348,6 @@ ENTRY(name)
15117 i = i + 1 ; \
15118 .endr
15119
15120- .data
15121 /*
15122 * This default setting generates an ident mapping at address 0x100000
15123 * and a mapping for the kernel that precisely maps virtual address
15124@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15125 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15126 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15127 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15128+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
15129+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15130+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
15131+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15132+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15133+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15134 .org init_level4_pgt + L4_START_KERNEL*8, 0
15135 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15136 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15137
15138+#ifdef CONFIG_PAX_PER_CPU_PGD
15139+NEXT_PAGE(cpu_pgd)
15140+ .rept NR_CPUS
15141+ .fill 512,8,0
15142+ .endr
15143+#endif
15144+
15145 NEXT_PAGE(level3_ident_pgt)
15146 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15147+#ifdef CONFIG_XEN
15148 .fill 511,8,0
15149+#else
15150+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15151+ .fill 510,8,0
15152+#endif
15153+
15154+NEXT_PAGE(level3_vmalloc_start_pgt)
15155+ .fill 512,8,0
15156+
15157+NEXT_PAGE(level3_vmalloc_end_pgt)
15158+ .fill 512,8,0
15159+
15160+NEXT_PAGE(level3_vmemmap_pgt)
15161+ .fill L3_VMEMMAP_START,8,0
15162+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15163
15164 NEXT_PAGE(level3_kernel_pgt)
15165 .fill L3_START_KERNEL,8,0
15166@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15167 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15168 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15169
15170+NEXT_PAGE(level2_vmemmap_pgt)
15171+ .fill 512,8,0
15172+
15173 NEXT_PAGE(level2_fixmap_pgt)
15174- .fill 506,8,0
15175- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15176- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15177- .fill 5,8,0
15178+ .fill 507,8,0
15179+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15180+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15181+ .fill 4,8,0
15182
15183-NEXT_PAGE(level1_fixmap_pgt)
15184+NEXT_PAGE(level1_vsyscall_pgt)
15185 .fill 512,8,0
15186
15187-NEXT_PAGE(level2_ident_pgt)
15188- /* Since I easily can, map the first 1G.
15189+ /* Since I easily can, map the first 2G.
15190 * Don't set NX because code runs from these pages.
15191 */
15192- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15193+NEXT_PAGE(level2_ident_pgt)
15194+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15195
15196 NEXT_PAGE(level2_kernel_pgt)
15197 /*
15198@@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15199 * If you want to increase this then increase MODULES_VADDR
15200 * too.)
15201 */
15202- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15203- KERNEL_IMAGE_SIZE/PMD_SIZE)
15204-
15205-NEXT_PAGE(level2_spare_pgt)
15206- .fill 512, 8, 0
15207+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15208
15209 #undef PMDS
15210 #undef NEXT_PAGE
15211
15212- .data
15213+ .align PAGE_SIZE
15214+ENTRY(cpu_gdt_table)
15215+ .rept NR_CPUS
15216+ .quad 0x0000000000000000 /* NULL descriptor */
15217+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15218+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
15219+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
15220+ .quad 0x00cffb000000ffff /* __USER32_CS */
15221+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15222+ .quad 0x00affb000000ffff /* __USER_CS */
15223+
15224+#ifdef CONFIG_PAX_KERNEXEC
15225+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15226+#else
15227+ .quad 0x0 /* unused */
15228+#endif
15229+
15230+ .quad 0,0 /* TSS */
15231+ .quad 0,0 /* LDT */
15232+ .quad 0,0,0 /* three TLS descriptors */
15233+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
15234+ /* asm/segment.h:GDT_ENTRIES must match this */
15235+
15236+ /* zero the remaining page */
15237+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15238+ .endr
15239+
15240 .align 16
15241 .globl early_gdt_descr
15242 early_gdt_descr:
15243 .word GDT_ENTRIES*8-1
15244 early_gdt_descr_base:
15245- .quad INIT_PER_CPU_VAR(gdt_page)
15246+ .quad cpu_gdt_table
15247
15248 ENTRY(phys_base)
15249 /* This must match the first entry in level2_kernel_pgt */
15250 .quad 0x0000000000000000
15251
15252 #include "../../x86/xen/xen-head.S"
15253-
15254- .section .bss, "aw", @nobits
15255+
15256+ .section .rodata,"a",@progbits
15257 .align L1_CACHE_BYTES
15258 ENTRY(idt_table)
15259- .skip IDT_ENTRIES * 16
15260+ .fill 512,8,0
15261
15262 __PAGE_ALIGNED_BSS
15263 .align PAGE_SIZE
15264diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15265index 9c3bd4a..e1d9b35 100644
15266--- a/arch/x86/kernel/i386_ksyms_32.c
15267+++ b/arch/x86/kernel/i386_ksyms_32.c
15268@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15269 EXPORT_SYMBOL(cmpxchg8b_emu);
15270 #endif
15271
15272+EXPORT_SYMBOL_GPL(cpu_gdt_table);
15273+
15274 /* Networking helper routines. */
15275 EXPORT_SYMBOL(csum_partial_copy_generic);
15276+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15277+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15278
15279 EXPORT_SYMBOL(__get_user_1);
15280 EXPORT_SYMBOL(__get_user_2);
15281@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15282
15283 EXPORT_SYMBOL(csum_partial);
15284 EXPORT_SYMBOL(empty_zero_page);
15285+
15286+#ifdef CONFIG_PAX_KERNEXEC
15287+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15288+#endif
15289diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15290index 6104852..6114160 100644
15291--- a/arch/x86/kernel/i8259.c
15292+++ b/arch/x86/kernel/i8259.c
15293@@ -210,7 +210,7 @@ spurious_8259A_irq:
15294 "spurious 8259A interrupt: IRQ%d.\n", irq);
15295 spurious_irq_mask |= irqmask;
15296 }
15297- atomic_inc(&irq_err_count);
15298+ atomic_inc_unchecked(&irq_err_count);
15299 /*
15300 * Theoretically we do not have to handle this IRQ,
15301 * but in Linux this does not cause problems and is
15302diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15303index 43e9ccf..44ccf6f 100644
15304--- a/arch/x86/kernel/init_task.c
15305+++ b/arch/x86/kernel/init_task.c
15306@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15307 * way process stacks are handled. This is done by having a special
15308 * "init_task" linker map entry..
15309 */
15310-union thread_union init_thread_union __init_task_data =
15311- { INIT_THREAD_INFO(init_task) };
15312+union thread_union init_thread_union __init_task_data;
15313
15314 /*
15315 * Initial task structure.
15316@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15317 * section. Since TSS's are completely CPU-local, we want them
15318 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15319 */
15320-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15321-
15322+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15323+EXPORT_SYMBOL(init_tss);
15324diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15325index 8c96897..be66bfa 100644
15326--- a/arch/x86/kernel/ioport.c
15327+++ b/arch/x86/kernel/ioport.c
15328@@ -6,6 +6,7 @@
15329 #include <linux/sched.h>
15330 #include <linux/kernel.h>
15331 #include <linux/capability.h>
15332+#include <linux/security.h>
15333 #include <linux/errno.h>
15334 #include <linux/types.h>
15335 #include <linux/ioport.h>
15336@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15337
15338 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15339 return -EINVAL;
15340+#ifdef CONFIG_GRKERNSEC_IO
15341+ if (turn_on && grsec_disable_privio) {
15342+ gr_handle_ioperm();
15343+ return -EPERM;
15344+ }
15345+#endif
15346 if (turn_on && !capable(CAP_SYS_RAWIO))
15347 return -EPERM;
15348
15349@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15350 * because the ->io_bitmap_max value must match the bitmap
15351 * contents:
15352 */
15353- tss = &per_cpu(init_tss, get_cpu());
15354+ tss = init_tss + get_cpu();
15355
15356 if (turn_on)
15357 bitmap_clear(t->io_bitmap_ptr, from, num);
15358@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15359 return -EINVAL;
15360 /* Trying to gain more privileges? */
15361 if (level > old) {
15362+#ifdef CONFIG_GRKERNSEC_IO
15363+ if (grsec_disable_privio) {
15364+ gr_handle_iopl();
15365+ return -EPERM;
15366+ }
15367+#endif
15368 if (!capable(CAP_SYS_RAWIO))
15369 return -EPERM;
15370 }
15371diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15372index 429e0c9..17b3ece 100644
15373--- a/arch/x86/kernel/irq.c
15374+++ b/arch/x86/kernel/irq.c
15375@@ -18,7 +18,7 @@
15376 #include <asm/mce.h>
15377 #include <asm/hw_irq.h>
15378
15379-atomic_t irq_err_count;
15380+atomic_unchecked_t irq_err_count;
15381
15382 /* Function pointer for generic interrupt vector handling */
15383 void (*x86_platform_ipi_callback)(void) = NULL;
15384@@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15385 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15386 seq_printf(p, " Machine check polls\n");
15387 #endif
15388- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15389+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15390 #if defined(CONFIG_X86_IO_APIC)
15391- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15392+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15393 #endif
15394 return 0;
15395 }
15396@@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15397
15398 u64 arch_irq_stat(void)
15399 {
15400- u64 sum = atomic_read(&irq_err_count);
15401+ u64 sum = atomic_read_unchecked(&irq_err_count);
15402
15403 #ifdef CONFIG_X86_IO_APIC
15404- sum += atomic_read(&irq_mis_count);
15405+ sum += atomic_read_unchecked(&irq_mis_count);
15406 #endif
15407 return sum;
15408 }
15409diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15410index 7209070..cbcd71a 100644
15411--- a/arch/x86/kernel/irq_32.c
15412+++ b/arch/x86/kernel/irq_32.c
15413@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15414 __asm__ __volatile__("andl %%esp,%0" :
15415 "=r" (sp) : "0" (THREAD_SIZE - 1));
15416
15417- return sp < (sizeof(struct thread_info) + STACK_WARN);
15418+ return sp < STACK_WARN;
15419 }
15420
15421 static void print_stack_overflow(void)
15422@@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15423 * per-CPU IRQ handling contexts (thread information and stack)
15424 */
15425 union irq_ctx {
15426- struct thread_info tinfo;
15427- u32 stack[THREAD_SIZE/sizeof(u32)];
15428+ unsigned long previous_esp;
15429+ u32 stack[THREAD_SIZE/sizeof(u32)];
15430 } __attribute__((aligned(THREAD_SIZE)));
15431
15432 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15433@@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15434 static inline int
15435 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15436 {
15437- union irq_ctx *curctx, *irqctx;
15438+ union irq_ctx *irqctx;
15439 u32 *isp, arg1, arg2;
15440
15441- curctx = (union irq_ctx *) current_thread_info();
15442 irqctx = __this_cpu_read(hardirq_ctx);
15443
15444 /*
15445@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15446 * handler) we can't do that and just have to keep using the
15447 * current stack (which is the irq stack already after all)
15448 */
15449- if (unlikely(curctx == irqctx))
15450+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15451 return 0;
15452
15453 /* build the stack frame on the IRQ stack */
15454- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15455- irqctx->tinfo.task = curctx->tinfo.task;
15456- irqctx->tinfo.previous_esp = current_stack_pointer;
15457+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15458+ irqctx->previous_esp = current_stack_pointer;
15459
15460- /*
15461- * Copy the softirq bits in preempt_count so that the
15462- * softirq checks work in the hardirq context.
15463- */
15464- irqctx->tinfo.preempt_count =
15465- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15466- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15467+#ifdef CONFIG_PAX_MEMORY_UDEREF
15468+ __set_fs(MAKE_MM_SEG(0));
15469+#endif
15470
15471 if (unlikely(overflow))
15472 call_on_stack(print_stack_overflow, isp);
15473@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15474 : "0" (irq), "1" (desc), "2" (isp),
15475 "D" (desc->handle_irq)
15476 : "memory", "cc", "ecx");
15477+
15478+#ifdef CONFIG_PAX_MEMORY_UDEREF
15479+ __set_fs(current_thread_info()->addr_limit);
15480+#endif
15481+
15482 return 1;
15483 }
15484
15485@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15486 */
15487 void __cpuinit irq_ctx_init(int cpu)
15488 {
15489- union irq_ctx *irqctx;
15490-
15491 if (per_cpu(hardirq_ctx, cpu))
15492 return;
15493
15494- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15495- THREAD_FLAGS,
15496- THREAD_ORDER));
15497- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15498- irqctx->tinfo.cpu = cpu;
15499- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15500- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15501-
15502- per_cpu(hardirq_ctx, cpu) = irqctx;
15503-
15504- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15505- THREAD_FLAGS,
15506- THREAD_ORDER));
15507- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15508- irqctx->tinfo.cpu = cpu;
15509- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15510-
15511- per_cpu(softirq_ctx, cpu) = irqctx;
15512+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15513+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15514
15515 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15516 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15517@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15518 asmlinkage void do_softirq(void)
15519 {
15520 unsigned long flags;
15521- struct thread_info *curctx;
15522 union irq_ctx *irqctx;
15523 u32 *isp;
15524
15525@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15526 local_irq_save(flags);
15527
15528 if (local_softirq_pending()) {
15529- curctx = current_thread_info();
15530 irqctx = __this_cpu_read(softirq_ctx);
15531- irqctx->tinfo.task = curctx->task;
15532- irqctx->tinfo.previous_esp = current_stack_pointer;
15533+ irqctx->previous_esp = current_stack_pointer;
15534
15535 /* build the stack frame on the softirq stack */
15536- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15537+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15538+
15539+#ifdef CONFIG_PAX_MEMORY_UDEREF
15540+ __set_fs(MAKE_MM_SEG(0));
15541+#endif
15542
15543 call_on_stack(__do_softirq, isp);
15544+
15545+#ifdef CONFIG_PAX_MEMORY_UDEREF
15546+ __set_fs(current_thread_info()->addr_limit);
15547+#endif
15548+
15549 /*
15550 * Shouldn't happen, we returned above if in_interrupt():
15551 */
15552diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
15553index 69bca46..0bac999 100644
15554--- a/arch/x86/kernel/irq_64.c
15555+++ b/arch/x86/kernel/irq_64.c
15556@@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
15557 #ifdef CONFIG_DEBUG_STACKOVERFLOW
15558 u64 curbase = (u64)task_stack_page(current);
15559
15560- if (user_mode_vm(regs))
15561+ if (user_mode(regs))
15562 return;
15563
15564 WARN_ONCE(regs->sp >= curbase &&
15565diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15566index faba577..93b9e71 100644
15567--- a/arch/x86/kernel/kgdb.c
15568+++ b/arch/x86/kernel/kgdb.c
15569@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15570 #ifdef CONFIG_X86_32
15571 switch (regno) {
15572 case GDB_SS:
15573- if (!user_mode_vm(regs))
15574+ if (!user_mode(regs))
15575 *(unsigned long *)mem = __KERNEL_DS;
15576 break;
15577 case GDB_SP:
15578- if (!user_mode_vm(regs))
15579+ if (!user_mode(regs))
15580 *(unsigned long *)mem = kernel_stack_pointer(regs);
15581 break;
15582 case GDB_GS:
15583@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15584 case 'k':
15585 /* clear the trace bit */
15586 linux_regs->flags &= ~X86_EFLAGS_TF;
15587- atomic_set(&kgdb_cpu_doing_single_step, -1);
15588+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15589
15590 /* set the trace bit if we're stepping */
15591 if (remcomInBuffer[0] == 's') {
15592 linux_regs->flags |= X86_EFLAGS_TF;
15593- atomic_set(&kgdb_cpu_doing_single_step,
15594+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15595 raw_smp_processor_id());
15596 }
15597
15598@@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15599
15600 switch (cmd) {
15601 case DIE_DEBUG:
15602- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15603+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15604 if (user_mode(regs))
15605 return single_step_cont(regs, args);
15606 break;
15607diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15608index 7da647d..5d3c4c1 100644
15609--- a/arch/x86/kernel/kprobes.c
15610+++ b/arch/x86/kernel/kprobes.c
15611@@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15612 } __attribute__((packed)) *insn;
15613
15614 insn = (struct __arch_relative_insn *)from;
15615+
15616+ pax_open_kernel();
15617 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15618 insn->op = op;
15619+ pax_close_kernel();
15620 }
15621
15622 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15623@@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15624 kprobe_opcode_t opcode;
15625 kprobe_opcode_t *orig_opcodes = opcodes;
15626
15627- if (search_exception_tables((unsigned long)opcodes))
15628+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15629 return 0; /* Page fault may occur on this address. */
15630
15631 retry:
15632@@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15633 }
15634 }
15635 insn_get_length(&insn);
15636+ pax_open_kernel();
15637 memcpy(dest, insn.kaddr, insn.length);
15638+ pax_close_kernel();
15639
15640 #ifdef CONFIG_X86_64
15641 if (insn_rip_relative(&insn)) {
15642@@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15643 (u8 *) dest;
15644 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15645 disp = (u8 *) dest + insn_offset_displacement(&insn);
15646+ pax_open_kernel();
15647 *(s32 *) disp = (s32) newdisp;
15648+ pax_close_kernel();
15649 }
15650 #endif
15651 return insn.length;
15652@@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15653 */
15654 __copy_instruction(p->ainsn.insn, p->addr, 0);
15655
15656- if (can_boost(p->addr))
15657+ if (can_boost(ktla_ktva(p->addr)))
15658 p->ainsn.boostable = 0;
15659 else
15660 p->ainsn.boostable = -1;
15661
15662- p->opcode = *p->addr;
15663+ p->opcode = *(ktla_ktva(p->addr));
15664 }
15665
15666 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15667@@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15668 * nor set current_kprobe, because it doesn't use single
15669 * stepping.
15670 */
15671- regs->ip = (unsigned long)p->ainsn.insn;
15672+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15673 preempt_enable_no_resched();
15674 return;
15675 }
15676@@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15677 if (p->opcode == BREAKPOINT_INSTRUCTION)
15678 regs->ip = (unsigned long)p->addr;
15679 else
15680- regs->ip = (unsigned long)p->ainsn.insn;
15681+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15682 }
15683
15684 /*
15685@@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15686 setup_singlestep(p, regs, kcb, 0);
15687 return 1;
15688 }
15689- } else if (*addr != BREAKPOINT_INSTRUCTION) {
15690+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15691 /*
15692 * The breakpoint instruction was removed right
15693 * after we hit it. Another cpu has removed
15694@@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15695 " movq %rax, 152(%rsp)\n"
15696 RESTORE_REGS_STRING
15697 " popfq\n"
15698+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
15699+ " btsq $63,(%rsp)\n"
15700+#endif
15701 #else
15702 " pushf\n"
15703 SAVE_REGS_STRING
15704@@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15705 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15706 {
15707 unsigned long *tos = stack_addr(regs);
15708- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15709+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15710 unsigned long orig_ip = (unsigned long)p->addr;
15711 kprobe_opcode_t *insn = p->ainsn.insn;
15712
15713@@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15714 struct die_args *args = data;
15715 int ret = NOTIFY_DONE;
15716
15717- if (args->regs && user_mode_vm(args->regs))
15718+ if (args->regs && user_mode(args->regs))
15719 return ret;
15720
15721 switch (val) {
15722@@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15723 * Verify if the address gap is in 2GB range, because this uses
15724 * a relative jump.
15725 */
15726- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15727+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15728 if (abs(rel) > 0x7fffffff)
15729 return -ERANGE;
15730
15731@@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15732 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15733
15734 /* Set probe function call */
15735- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
15736+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
15737
15738 /* Set returning jmp instruction at the tail of out-of-line buffer */
15739 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
15740- (u8 *)op->kp.addr + op->optinsn.size);
15741+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
15742
15743 flush_icache_range((unsigned long) buf,
15744 (unsigned long) buf + TMPL_END_IDX +
15745@@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
15746 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
15747
15748 /* Backup instructions which will be replaced by jump address */
15749- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
15750+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
15751 RELATIVE_ADDR_SIZE);
15752
15753 insn_buf[0] = RELATIVEJUMP_OPCODE;
15754diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
15755index a9c2116..a52d4fc 100644
15756--- a/arch/x86/kernel/kvm.c
15757+++ b/arch/x86/kernel/kvm.c
15758@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
15759 pv_mmu_ops.set_pud = kvm_set_pud;
15760 #if PAGETABLE_LEVELS == 4
15761 pv_mmu_ops.set_pgd = kvm_set_pgd;
15762+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15763 #endif
15764 #endif
15765 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15766diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
15767index ea69726..604d066 100644
15768--- a/arch/x86/kernel/ldt.c
15769+++ b/arch/x86/kernel/ldt.c
15770@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
15771 if (reload) {
15772 #ifdef CONFIG_SMP
15773 preempt_disable();
15774- load_LDT(pc);
15775+ load_LDT_nolock(pc);
15776 if (!cpumask_equal(mm_cpumask(current->mm),
15777 cpumask_of(smp_processor_id())))
15778 smp_call_function(flush_ldt, current->mm, 1);
15779 preempt_enable();
15780 #else
15781- load_LDT(pc);
15782+ load_LDT_nolock(pc);
15783 #endif
15784 }
15785 if (oldsize) {
15786@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
15787 return err;
15788
15789 for (i = 0; i < old->size; i++)
15790- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15791+ write_ldt_entry(new->ldt, i, old->ldt + i);
15792 return 0;
15793 }
15794
15795@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
15796 retval = copy_ldt(&mm->context, &old_mm->context);
15797 mutex_unlock(&old_mm->context.lock);
15798 }
15799+
15800+ if (tsk == current) {
15801+ mm->context.vdso = 0;
15802+
15803+#ifdef CONFIG_X86_32
15804+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15805+ mm->context.user_cs_base = 0UL;
15806+ mm->context.user_cs_limit = ~0UL;
15807+
15808+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15809+ cpus_clear(mm->context.cpu_user_cs_mask);
15810+#endif
15811+
15812+#endif
15813+#endif
15814+
15815+ }
15816+
15817 return retval;
15818 }
15819
15820@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
15821 }
15822 }
15823
15824+#ifdef CONFIG_PAX_SEGMEXEC
15825+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15826+ error = -EINVAL;
15827+ goto out_unlock;
15828+ }
15829+#endif
15830+
15831 fill_ldt(&ldt, &ldt_info);
15832 if (oldmode)
15833 ldt.avl = 0;
15834diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
15835index a3fa43b..8966f4c 100644
15836--- a/arch/x86/kernel/machine_kexec_32.c
15837+++ b/arch/x86/kernel/machine_kexec_32.c
15838@@ -27,7 +27,7 @@
15839 #include <asm/cacheflush.h>
15840 #include <asm/debugreg.h>
15841
15842-static void set_idt(void *newidt, __u16 limit)
15843+static void set_idt(struct desc_struct *newidt, __u16 limit)
15844 {
15845 struct desc_ptr curidt;
15846
15847@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
15848 }
15849
15850
15851-static void set_gdt(void *newgdt, __u16 limit)
15852+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15853 {
15854 struct desc_ptr curgdt;
15855
15856@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15857 }
15858
15859 control_page = page_address(image->control_code_page);
15860- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15861+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15862
15863 relocate_kernel_ptr = control_page;
15864 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15865diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
15866index 3ca42d0..7cff8cc 100644
15867--- a/arch/x86/kernel/microcode_intel.c
15868+++ b/arch/x86/kernel/microcode_intel.c
15869@@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
15870
15871 static int get_ucode_user(void *to, const void *from, size_t n)
15872 {
15873- return copy_from_user(to, from, n);
15874+ return copy_from_user(to, (const void __force_user *)from, n);
15875 }
15876
15877 static enum ucode_state
15878 request_microcode_user(int cpu, const void __user *buf, size_t size)
15879 {
15880- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15881+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
15882 }
15883
15884 static void microcode_fini_cpu(int cpu)
15885diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
15886index 925179f..85bec6c 100644
15887--- a/arch/x86/kernel/module.c
15888+++ b/arch/x86/kernel/module.c
15889@@ -36,15 +36,60 @@
15890 #define DEBUGP(fmt...)
15891 #endif
15892
15893-void *module_alloc(unsigned long size)
15894+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
15895 {
15896 if (PAGE_ALIGN(size) > MODULES_LEN)
15897 return NULL;
15898 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
15899- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
15900+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
15901 -1, __builtin_return_address(0));
15902 }
15903
15904+void *module_alloc(unsigned long size)
15905+{
15906+
15907+#ifdef CONFIG_PAX_KERNEXEC
15908+ return __module_alloc(size, PAGE_KERNEL);
15909+#else
15910+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15911+#endif
15912+
15913+}
15914+
15915+#ifdef CONFIG_PAX_KERNEXEC
15916+#ifdef CONFIG_X86_32
15917+void *module_alloc_exec(unsigned long size)
15918+{
15919+ struct vm_struct *area;
15920+
15921+ if (size == 0)
15922+ return NULL;
15923+
15924+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15925+ return area ? area->addr : NULL;
15926+}
15927+EXPORT_SYMBOL(module_alloc_exec);
15928+
15929+void module_free_exec(struct module *mod, void *module_region)
15930+{
15931+ vunmap(module_region);
15932+}
15933+EXPORT_SYMBOL(module_free_exec);
15934+#else
15935+void module_free_exec(struct module *mod, void *module_region)
15936+{
15937+ module_free(mod, module_region);
15938+}
15939+EXPORT_SYMBOL(module_free_exec);
15940+
15941+void *module_alloc_exec(unsigned long size)
15942+{
15943+ return __module_alloc(size, PAGE_KERNEL_RX);
15944+}
15945+EXPORT_SYMBOL(module_alloc_exec);
15946+#endif
15947+#endif
15948+
15949 #ifdef CONFIG_X86_32
15950 int apply_relocate(Elf32_Shdr *sechdrs,
15951 const char *strtab,
15952@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15953 unsigned int i;
15954 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15955 Elf32_Sym *sym;
15956- uint32_t *location;
15957+ uint32_t *plocation, location;
15958
15959 DEBUGP("Applying relocate section %u to %u\n", relsec,
15960 sechdrs[relsec].sh_info);
15961 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15962 /* This is where to make the change */
15963- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15964- + rel[i].r_offset;
15965+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15966+ location = (uint32_t)plocation;
15967+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15968+ plocation = ktla_ktva((void *)plocation);
15969 /* This is the symbol it is referring to. Note that all
15970 undefined symbols have been resolved. */
15971 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15972@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15973 switch (ELF32_R_TYPE(rel[i].r_info)) {
15974 case R_386_32:
15975 /* We add the value into the location given */
15976- *location += sym->st_value;
15977+ pax_open_kernel();
15978+ *plocation += sym->st_value;
15979+ pax_close_kernel();
15980 break;
15981 case R_386_PC32:
15982 /* Add the value, subtract its postition */
15983- *location += sym->st_value - (uint32_t)location;
15984+ pax_open_kernel();
15985+ *plocation += sym->st_value - location;
15986+ pax_close_kernel();
15987 break;
15988 default:
15989 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15990@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
15991 case R_X86_64_NONE:
15992 break;
15993 case R_X86_64_64:
15994+ pax_open_kernel();
15995 *(u64 *)loc = val;
15996+ pax_close_kernel();
15997 break;
15998 case R_X86_64_32:
15999+ pax_open_kernel();
16000 *(u32 *)loc = val;
16001+ pax_close_kernel();
16002 if (val != *(u32 *)loc)
16003 goto overflow;
16004 break;
16005 case R_X86_64_32S:
16006+ pax_open_kernel();
16007 *(s32 *)loc = val;
16008+ pax_close_kernel();
16009 if ((s64)val != *(s32 *)loc)
16010 goto overflow;
16011 break;
16012 case R_X86_64_PC32:
16013 val -= (u64)loc;
16014+ pax_open_kernel();
16015 *(u32 *)loc = val;
16016+ pax_close_kernel();
16017+
16018 #if 0
16019 if ((s64)val != *(s32 *)loc)
16020 goto overflow;
16021diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
16022index e88f37b..1353db6 100644
16023--- a/arch/x86/kernel/nmi.c
16024+++ b/arch/x86/kernel/nmi.c
16025@@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
16026 dotraplinkage notrace __kprobes void
16027 do_nmi(struct pt_regs *regs, long error_code)
16028 {
16029+
16030+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16031+ if (!user_mode(regs)) {
16032+ unsigned long cs = regs->cs & 0xFFFF;
16033+ unsigned long ip = ktva_ktla(regs->ip);
16034+
16035+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16036+ regs->ip = ip;
16037+ }
16038+#endif
16039+
16040 nmi_enter();
16041
16042 inc_irq_stat(__nmi_count);
16043diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
16044index 676b8c7..870ba04 100644
16045--- a/arch/x86/kernel/paravirt-spinlocks.c
16046+++ b/arch/x86/kernel/paravirt-spinlocks.c
16047@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
16048 arch_spin_lock(lock);
16049 }
16050
16051-struct pv_lock_ops pv_lock_ops = {
16052+struct pv_lock_ops pv_lock_ops __read_only = {
16053 #ifdef CONFIG_SMP
16054 .spin_is_locked = __ticket_spin_is_locked,
16055 .spin_is_contended = __ticket_spin_is_contended,
16056diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
16057index d90272e..6bb013b 100644
16058--- a/arch/x86/kernel/paravirt.c
16059+++ b/arch/x86/kernel/paravirt.c
16060@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
16061 {
16062 return x;
16063 }
16064+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16065+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
16066+#endif
16067
16068 void __init default_banner(void)
16069 {
16070@@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
16071 if (opfunc == NULL)
16072 /* If there's no function, patch it with a ud2a (BUG) */
16073 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16074- else if (opfunc == _paravirt_nop)
16075+ else if (opfunc == (void *)_paravirt_nop)
16076 /* If the operation is a nop, then nop the callsite */
16077 ret = paravirt_patch_nop();
16078
16079 /* identity functions just return their single argument */
16080- else if (opfunc == _paravirt_ident_32)
16081+ else if (opfunc == (void *)_paravirt_ident_32)
16082 ret = paravirt_patch_ident_32(insnbuf, len);
16083- else if (opfunc == _paravirt_ident_64)
16084+ else if (opfunc == (void *)_paravirt_ident_64)
16085 ret = paravirt_patch_ident_64(insnbuf, len);
16086+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16087+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16088+ ret = paravirt_patch_ident_64(insnbuf, len);
16089+#endif
16090
16091 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16092 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16093@@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16094 if (insn_len > len || start == NULL)
16095 insn_len = len;
16096 else
16097- memcpy(insnbuf, start, insn_len);
16098+ memcpy(insnbuf, ktla_ktva(start), insn_len);
16099
16100 return insn_len;
16101 }
16102@@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
16103 preempt_enable();
16104 }
16105
16106-struct pv_info pv_info = {
16107+struct pv_info pv_info __read_only = {
16108 .name = "bare hardware",
16109 .paravirt_enabled = 0,
16110 .kernel_rpl = 0,
16111@@ -313,16 +320,16 @@ struct pv_info pv_info = {
16112 #endif
16113 };
16114
16115-struct pv_init_ops pv_init_ops = {
16116+struct pv_init_ops pv_init_ops __read_only = {
16117 .patch = native_patch,
16118 };
16119
16120-struct pv_time_ops pv_time_ops = {
16121+struct pv_time_ops pv_time_ops __read_only = {
16122 .sched_clock = native_sched_clock,
16123 .steal_clock = native_steal_clock,
16124 };
16125
16126-struct pv_irq_ops pv_irq_ops = {
16127+struct pv_irq_ops pv_irq_ops __read_only = {
16128 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16129 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16130 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16131@@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
16132 #endif
16133 };
16134
16135-struct pv_cpu_ops pv_cpu_ops = {
16136+struct pv_cpu_ops pv_cpu_ops __read_only = {
16137 .cpuid = native_cpuid,
16138 .get_debugreg = native_get_debugreg,
16139 .set_debugreg = native_set_debugreg,
16140@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16141 .end_context_switch = paravirt_nop,
16142 };
16143
16144-struct pv_apic_ops pv_apic_ops = {
16145+struct pv_apic_ops pv_apic_ops __read_only = {
16146 #ifdef CONFIG_X86_LOCAL_APIC
16147 .startup_ipi_hook = paravirt_nop,
16148 #endif
16149 };
16150
16151-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16152+#ifdef CONFIG_X86_32
16153+#ifdef CONFIG_X86_PAE
16154+/* 64-bit pagetable entries */
16155+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16156+#else
16157 /* 32-bit pagetable entries */
16158 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16159+#endif
16160 #else
16161 /* 64-bit pagetable entries */
16162 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16163 #endif
16164
16165-struct pv_mmu_ops pv_mmu_ops = {
16166+struct pv_mmu_ops pv_mmu_ops __read_only = {
16167
16168 .read_cr2 = native_read_cr2,
16169 .write_cr2 = native_write_cr2,
16170@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16171 .make_pud = PTE_IDENT,
16172
16173 .set_pgd = native_set_pgd,
16174+ .set_pgd_batched = native_set_pgd_batched,
16175 #endif
16176 #endif /* PAGETABLE_LEVELS >= 3 */
16177
16178@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16179 },
16180
16181 .set_fixmap = native_set_fixmap,
16182+
16183+#ifdef CONFIG_PAX_KERNEXEC
16184+ .pax_open_kernel = native_pax_open_kernel,
16185+ .pax_close_kernel = native_pax_close_kernel,
16186+#endif
16187+
16188 };
16189
16190 EXPORT_SYMBOL_GPL(pv_time_ops);
16191diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16192index 35ccf75..7a15747 100644
16193--- a/arch/x86/kernel/pci-iommu_table.c
16194+++ b/arch/x86/kernel/pci-iommu_table.c
16195@@ -2,7 +2,7 @@
16196 #include <asm/iommu_table.h>
16197 #include <linux/string.h>
16198 #include <linux/kallsyms.h>
16199-
16200+#include <linux/sched.h>
16201
16202 #define DEBUG 1
16203
16204diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16205index ee5d4fb..426649b 100644
16206--- a/arch/x86/kernel/process.c
16207+++ b/arch/x86/kernel/process.c
16208@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16209
16210 void free_thread_info(struct thread_info *ti)
16211 {
16212- free_thread_xstate(ti->task);
16213 free_pages((unsigned long)ti, THREAD_ORDER);
16214 }
16215
16216+static struct kmem_cache *task_struct_cachep;
16217+
16218 void arch_task_cache_init(void)
16219 {
16220- task_xstate_cachep =
16221- kmem_cache_create("task_xstate", xstate_size,
16222+ /* create a slab on which task_structs can be allocated */
16223+ task_struct_cachep =
16224+ kmem_cache_create("task_struct", sizeof(struct task_struct),
16225+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16226+
16227+ task_xstate_cachep =
16228+ kmem_cache_create("task_xstate", xstate_size,
16229 __alignof__(union thread_xstate),
16230- SLAB_PANIC | SLAB_NOTRACK, NULL);
16231+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16232+}
16233+
16234+struct task_struct *alloc_task_struct_node(int node)
16235+{
16236+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16237+}
16238+
16239+void free_task_struct(struct task_struct *task)
16240+{
16241+ free_thread_xstate(task);
16242+ kmem_cache_free(task_struct_cachep, task);
16243 }
16244
16245 /*
16246@@ -70,7 +87,7 @@ void exit_thread(void)
16247 unsigned long *bp = t->io_bitmap_ptr;
16248
16249 if (bp) {
16250- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16251+ struct tss_struct *tss = init_tss + get_cpu();
16252
16253 t->io_bitmap_ptr = NULL;
16254 clear_thread_flag(TIF_IO_BITMAP);
16255@@ -106,7 +123,7 @@ void show_regs_common(void)
16256
16257 printk(KERN_CONT "\n");
16258 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16259- current->pid, current->comm, print_tainted(),
16260+ task_pid_nr(current), current->comm, print_tainted(),
16261 init_utsname()->release,
16262 (int)strcspn(init_utsname()->version, " "),
16263 init_utsname()->version);
16264@@ -120,6 +137,9 @@ void flush_thread(void)
16265 {
16266 struct task_struct *tsk = current;
16267
16268+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16269+ loadsegment(gs, 0);
16270+#endif
16271 flush_ptrace_hw_breakpoint(tsk);
16272 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16273 /*
16274@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16275 regs.di = (unsigned long) arg;
16276
16277 #ifdef CONFIG_X86_32
16278- regs.ds = __USER_DS;
16279- regs.es = __USER_DS;
16280+ regs.ds = __KERNEL_DS;
16281+ regs.es = __KERNEL_DS;
16282 regs.fs = __KERNEL_PERCPU;
16283- regs.gs = __KERNEL_STACK_CANARY;
16284+ savesegment(gs, regs.gs);
16285 #else
16286 regs.ss = __KERNEL_DS;
16287 #endif
16288@@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
16289
16290 return ret;
16291 }
16292-void stop_this_cpu(void *dummy)
16293+__noreturn void stop_this_cpu(void *dummy)
16294 {
16295 local_irq_disable();
16296 /*
16297@@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
16298 }
16299 early_param("idle", idle_setup);
16300
16301-unsigned long arch_align_stack(unsigned long sp)
16302+#ifdef CONFIG_PAX_RANDKSTACK
16303+void pax_randomize_kstack(struct pt_regs *regs)
16304 {
16305- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16306- sp -= get_random_int() % 8192;
16307- return sp & ~0xf;
16308-}
16309+ struct thread_struct *thread = &current->thread;
16310+ unsigned long time;
16311
16312-unsigned long arch_randomize_brk(struct mm_struct *mm)
16313-{
16314- unsigned long range_end = mm->brk + 0x02000000;
16315- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16316-}
16317+ if (!randomize_va_space)
16318+ return;
16319+
16320+ if (v8086_mode(regs))
16321+ return;
16322
16323+ rdtscl(time);
16324+
16325+ /* P4 seems to return a 0 LSB, ignore it */
16326+#ifdef CONFIG_MPENTIUM4
16327+ time &= 0x3EUL;
16328+ time <<= 2;
16329+#elif defined(CONFIG_X86_64)
16330+ time &= 0xFUL;
16331+ time <<= 4;
16332+#else
16333+ time &= 0x1FUL;
16334+ time <<= 3;
16335+#endif
16336+
16337+ thread->sp0 ^= time;
16338+ load_sp0(init_tss + smp_processor_id(), thread);
16339+
16340+#ifdef CONFIG_X86_64
16341+ percpu_write(kernel_stack, thread->sp0);
16342+#endif
16343+}
16344+#endif
16345diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16346index 795b79f..063767a 100644
16347--- a/arch/x86/kernel/process_32.c
16348+++ b/arch/x86/kernel/process_32.c
16349@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16350 unsigned long thread_saved_pc(struct task_struct *tsk)
16351 {
16352 return ((unsigned long *)tsk->thread.sp)[3];
16353+//XXX return tsk->thread.eip;
16354 }
16355
16356 #ifndef CONFIG_SMP
16357@@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
16358 unsigned long sp;
16359 unsigned short ss, gs;
16360
16361- if (user_mode_vm(regs)) {
16362+ if (user_mode(regs)) {
16363 sp = regs->sp;
16364 ss = regs->ss & 0xffff;
16365- gs = get_user_gs(regs);
16366 } else {
16367 sp = kernel_stack_pointer(regs);
16368 savesegment(ss, ss);
16369- savesegment(gs, gs);
16370 }
16371+ gs = get_user_gs(regs);
16372
16373 show_regs_common();
16374
16375@@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16376 struct task_struct *tsk;
16377 int err;
16378
16379- childregs = task_pt_regs(p);
16380+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16381 *childregs = *regs;
16382 childregs->ax = 0;
16383 childregs->sp = sp;
16384
16385 p->thread.sp = (unsigned long) childregs;
16386 p->thread.sp0 = (unsigned long) (childregs+1);
16387+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16388
16389 p->thread.ip = (unsigned long) ret_from_fork;
16390
16391@@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16392 struct thread_struct *prev = &prev_p->thread,
16393 *next = &next_p->thread;
16394 int cpu = smp_processor_id();
16395- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16396+ struct tss_struct *tss = init_tss + cpu;
16397 bool preload_fpu;
16398
16399 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16400@@ -331,6 +332,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16401 */
16402 lazy_save_gs(prev->gs);
16403
16404+#ifdef CONFIG_PAX_MEMORY_UDEREF
16405+ __set_fs(task_thread_info(next_p)->addr_limit);
16406+#endif
16407+
16408 /*
16409 * Load the per-thread Thread-Local Storage descriptor.
16410 */
16411@@ -366,6 +371,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16412 */
16413 arch_end_context_switch(next_p);
16414
16415+ percpu_write(current_task, next_p);
16416+ percpu_write(current_tinfo, &next_p->tinfo);
16417+
16418 if (preload_fpu)
16419 __math_state_restore();
16420
16421@@ -375,8 +383,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16422 if (prev->gs | next->gs)
16423 lazy_load_gs(next->gs);
16424
16425- percpu_write(current_task, next_p);
16426-
16427 return prev_p;
16428 }
16429
16430@@ -406,4 +412,3 @@ unsigned long get_wchan(struct task_struct *p)
16431 } while (count++ < 16);
16432 return 0;
16433 }
16434-
16435diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16436index 3bd7e6e..90b2bcf 100644
16437--- a/arch/x86/kernel/process_64.c
16438+++ b/arch/x86/kernel/process_64.c
16439@@ -89,7 +89,7 @@ static void __exit_idle(void)
16440 void exit_idle(void)
16441 {
16442 /* idle loop has pid 0 */
16443- if (current->pid)
16444+ if (task_pid_nr(current))
16445 return;
16446 __exit_idle();
16447 }
16448@@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16449 struct pt_regs *childregs;
16450 struct task_struct *me = current;
16451
16452- childregs = ((struct pt_regs *)
16453- (THREAD_SIZE + task_stack_page(p))) - 1;
16454+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16455 *childregs = *regs;
16456
16457 childregs->ax = 0;
16458@@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16459 p->thread.sp = (unsigned long) childregs;
16460 p->thread.sp0 = (unsigned long) (childregs+1);
16461 p->thread.usersp = me->thread.usersp;
16462+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16463
16464 set_tsk_thread_flag(p, TIF_FORK);
16465
16466@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16467 struct thread_struct *prev = &prev_p->thread;
16468 struct thread_struct *next = &next_p->thread;
16469 int cpu = smp_processor_id();
16470- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16471+ struct tss_struct *tss = init_tss + cpu;
16472 unsigned fsindex, gsindex;
16473 bool preload_fpu;
16474
16475@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16476 prev->usersp = percpu_read(old_rsp);
16477 percpu_write(old_rsp, next->usersp);
16478 percpu_write(current_task, next_p);
16479+ percpu_write(current_tinfo, &next_p->tinfo);
16480
16481- percpu_write(kernel_stack,
16482- (unsigned long)task_stack_page(next_p) +
16483- THREAD_SIZE - KERNEL_STACK_OFFSET);
16484+ percpu_write(kernel_stack, next->sp0);
16485
16486 /*
16487 * Now maybe reload the debug registers and handle I/O bitmaps
16488@@ -540,12 +539,11 @@ unsigned long get_wchan(struct task_struct *p)
16489 if (!p || p == current || p->state == TASK_RUNNING)
16490 return 0;
16491 stack = (unsigned long)task_stack_page(p);
16492- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16493+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16494 return 0;
16495 fp = *(u64 *)(p->thread.sp);
16496 do {
16497- if (fp < (unsigned long)stack ||
16498- fp >= (unsigned long)stack+THREAD_SIZE)
16499+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16500 return 0;
16501 ip = *(u64 *)(fp+8);
16502 if (!in_sched_functions(ip))
16503diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16504index 8252879..d3219e0 100644
16505--- a/arch/x86/kernel/ptrace.c
16506+++ b/arch/x86/kernel/ptrace.c
16507@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16508 unsigned long addr, unsigned long data)
16509 {
16510 int ret;
16511- unsigned long __user *datap = (unsigned long __user *)data;
16512+ unsigned long __user *datap = (__force unsigned long __user *)data;
16513
16514 switch (request) {
16515 /* read the word at location addr in the USER area. */
16516@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16517 if ((int) addr < 0)
16518 return -EIO;
16519 ret = do_get_thread_area(child, addr,
16520- (struct user_desc __user *)data);
16521+ (__force struct user_desc __user *) data);
16522 break;
16523
16524 case PTRACE_SET_THREAD_AREA:
16525 if ((int) addr < 0)
16526 return -EIO;
16527 ret = do_set_thread_area(child, addr,
16528- (struct user_desc __user *)data, 0);
16529+ (__force struct user_desc __user *) data, 0);
16530 break;
16531 #endif
16532
16533@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16534 memset(info, 0, sizeof(*info));
16535 info->si_signo = SIGTRAP;
16536 info->si_code = si_code;
16537- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16538+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16539 }
16540
16541 void user_single_step_siginfo(struct task_struct *tsk,
16542diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16543index 42eb330..139955c 100644
16544--- a/arch/x86/kernel/pvclock.c
16545+++ b/arch/x86/kernel/pvclock.c
16546@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16547 return pv_tsc_khz;
16548 }
16549
16550-static atomic64_t last_value = ATOMIC64_INIT(0);
16551+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16552
16553 void pvclock_resume(void)
16554 {
16555- atomic64_set(&last_value, 0);
16556+ atomic64_set_unchecked(&last_value, 0);
16557 }
16558
16559 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16560@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16561 * updating at the same time, and one of them could be slightly behind,
16562 * making the assumption that last_value always go forward fail to hold.
16563 */
16564- last = atomic64_read(&last_value);
16565+ last = atomic64_read_unchecked(&last_value);
16566 do {
16567 if (ret < last)
16568 return last;
16569- last = atomic64_cmpxchg(&last_value, last, ret);
16570+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16571 } while (unlikely(last != ret));
16572
16573 return ret;
16574diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16575index 37a458b..e63d183 100644
16576--- a/arch/x86/kernel/reboot.c
16577+++ b/arch/x86/kernel/reboot.c
16578@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16579 EXPORT_SYMBOL(pm_power_off);
16580
16581 static const struct desc_ptr no_idt = {};
16582-static int reboot_mode;
16583+static unsigned short reboot_mode;
16584 enum reboot_type reboot_type = BOOT_ACPI;
16585 int reboot_force;
16586
16587@@ -324,13 +324,17 @@ core_initcall(reboot_init);
16588 extern const unsigned char machine_real_restart_asm[];
16589 extern const u64 machine_real_restart_gdt[3];
16590
16591-void machine_real_restart(unsigned int type)
16592+__noreturn void machine_real_restart(unsigned int type)
16593 {
16594 void *restart_va;
16595 unsigned long restart_pa;
16596- void (*restart_lowmem)(unsigned int);
16597+ void (* __noreturn restart_lowmem)(unsigned int);
16598 u64 *lowmem_gdt;
16599
16600+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16601+ struct desc_struct *gdt;
16602+#endif
16603+
16604 local_irq_disable();
16605
16606 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16607@@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
16608 boot)". This seems like a fairly standard thing that gets set by
16609 REBOOT.COM programs, and the previous reset routine did this
16610 too. */
16611- *((unsigned short *)0x472) = reboot_mode;
16612+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16613
16614 /* Patch the GDT in the low memory trampoline */
16615 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16616
16617 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16618 restart_pa = virt_to_phys(restart_va);
16619- restart_lowmem = (void (*)(unsigned int))restart_pa;
16620+ restart_lowmem = (void *)restart_pa;
16621
16622 /* GDT[0]: GDT self-pointer */
16623 lowmem_gdt[0] =
16624@@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
16625 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16626
16627 /* Jump to the identity-mapped low memory code */
16628+
16629+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16630+ gdt = get_cpu_gdt_table(smp_processor_id());
16631+ pax_open_kernel();
16632+#ifdef CONFIG_PAX_MEMORY_UDEREF
16633+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16634+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16635+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16636+#endif
16637+#ifdef CONFIG_PAX_KERNEXEC
16638+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16639+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16640+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16641+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16642+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16643+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16644+#endif
16645+ pax_close_kernel();
16646+#endif
16647+
16648+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16649+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16650+ unreachable();
16651+#else
16652 restart_lowmem(type);
16653+#endif
16654+
16655 }
16656 #ifdef CONFIG_APM_MODULE
16657 EXPORT_SYMBOL(machine_real_restart);
16658@@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16659 * try to force a triple fault and then cycle between hitting the keyboard
16660 * controller and doing that
16661 */
16662-static void native_machine_emergency_restart(void)
16663+__noreturn static void native_machine_emergency_restart(void)
16664 {
16665 int i;
16666 int attempt = 0;
16667@@ -664,13 +694,13 @@ void native_machine_shutdown(void)
16668 #endif
16669 }
16670
16671-static void __machine_emergency_restart(int emergency)
16672+static __noreturn void __machine_emergency_restart(int emergency)
16673 {
16674 reboot_emergency = emergency;
16675 machine_ops.emergency_restart();
16676 }
16677
16678-static void native_machine_restart(char *__unused)
16679+static __noreturn void native_machine_restart(char *__unused)
16680 {
16681 printk("machine restart\n");
16682
16683@@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
16684 __machine_emergency_restart(0);
16685 }
16686
16687-static void native_machine_halt(void)
16688+static __noreturn void native_machine_halt(void)
16689 {
16690 /* stop other cpus and apics */
16691 machine_shutdown();
16692@@ -690,7 +720,7 @@ static void native_machine_halt(void)
16693 stop_this_cpu(NULL);
16694 }
16695
16696-static void native_machine_power_off(void)
16697+__noreturn static void native_machine_power_off(void)
16698 {
16699 if (pm_power_off) {
16700 if (!reboot_force)
16701@@ -699,6 +729,7 @@ static void native_machine_power_off(void)
16702 }
16703 /* a fallback in case there is no PM info available */
16704 tboot_shutdown(TB_SHUTDOWN_HALT);
16705+ unreachable();
16706 }
16707
16708 struct machine_ops machine_ops = {
16709diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16710index 7a6f3b3..bed145d7 100644
16711--- a/arch/x86/kernel/relocate_kernel_64.S
16712+++ b/arch/x86/kernel/relocate_kernel_64.S
16713@@ -11,6 +11,7 @@
16714 #include <asm/kexec.h>
16715 #include <asm/processor-flags.h>
16716 #include <asm/pgtable_types.h>
16717+#include <asm/alternative-asm.h>
16718
16719 /*
16720 * Must be relocatable PIC code callable as a C function
16721@@ -160,13 +161,14 @@ identity_mapped:
16722 xorq %rbp, %rbp
16723 xorq %r8, %r8
16724 xorq %r9, %r9
16725- xorq %r10, %r9
16726+ xorq %r10, %r10
16727 xorq %r11, %r11
16728 xorq %r12, %r12
16729 xorq %r13, %r13
16730 xorq %r14, %r14
16731 xorq %r15, %r15
16732
16733+ pax_force_retaddr 0, 1
16734 ret
16735
16736 1:
16737diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
16738index cf0ef98..e3f780b 100644
16739--- a/arch/x86/kernel/setup.c
16740+++ b/arch/x86/kernel/setup.c
16741@@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
16742
16743 switch (data->type) {
16744 case SETUP_E820_EXT:
16745- parse_e820_ext(data);
16746+ parse_e820_ext((struct setup_data __force_kernel *)data);
16747 break;
16748 case SETUP_DTB:
16749 add_dtb(pa_data);
16750@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
16751 * area (640->1Mb) as ram even though it is not.
16752 * take them out.
16753 */
16754- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
16755+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
16756 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
16757 }
16758
16759@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
16760
16761 if (!boot_params.hdr.root_flags)
16762 root_mountflags &= ~MS_RDONLY;
16763- init_mm.start_code = (unsigned long) _text;
16764- init_mm.end_code = (unsigned long) _etext;
16765+ init_mm.start_code = ktla_ktva((unsigned long) _text);
16766+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
16767 init_mm.end_data = (unsigned long) _edata;
16768 init_mm.brk = _brk_end;
16769
16770- code_resource.start = virt_to_phys(_text);
16771- code_resource.end = virt_to_phys(_etext)-1;
16772- data_resource.start = virt_to_phys(_etext);
16773+ code_resource.start = virt_to_phys(ktla_ktva(_text));
16774+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16775+ data_resource.start = virt_to_phys(_sdata);
16776 data_resource.end = virt_to_phys(_edata)-1;
16777 bss_resource.start = virt_to_phys(&__bss_start);
16778 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16779diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
16780index 71f4727..16dc9f7 100644
16781--- a/arch/x86/kernel/setup_percpu.c
16782+++ b/arch/x86/kernel/setup_percpu.c
16783@@ -21,19 +21,17 @@
16784 #include <asm/cpu.h>
16785 #include <asm/stackprotector.h>
16786
16787-DEFINE_PER_CPU(int, cpu_number);
16788+#ifdef CONFIG_SMP
16789+DEFINE_PER_CPU(unsigned int, cpu_number);
16790 EXPORT_PER_CPU_SYMBOL(cpu_number);
16791+#endif
16792
16793-#ifdef CONFIG_X86_64
16794 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16795-#else
16796-#define BOOT_PERCPU_OFFSET 0
16797-#endif
16798
16799 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16800 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16801
16802-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16803+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16804 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16805 };
16806 EXPORT_SYMBOL(__per_cpu_offset);
16807@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
16808 {
16809 #ifdef CONFIG_X86_32
16810 struct desc_struct gdt;
16811+ unsigned long base = per_cpu_offset(cpu);
16812
16813- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16814- 0x2 | DESCTYPE_S, 0x8);
16815- gdt.s = 1;
16816+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16817+ 0x83 | DESCTYPE_S, 0xC);
16818 write_gdt_entry(get_cpu_gdt_table(cpu),
16819 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16820 #endif
16821@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
16822 /* alrighty, percpu areas up and running */
16823 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16824 for_each_possible_cpu(cpu) {
16825+#ifdef CONFIG_CC_STACKPROTECTOR
16826+#ifdef CONFIG_X86_32
16827+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
16828+#endif
16829+#endif
16830 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16831 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16832 per_cpu(cpu_number, cpu) = cpu;
16833@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
16834 */
16835 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
16836 #endif
16837+#ifdef CONFIG_CC_STACKPROTECTOR
16838+#ifdef CONFIG_X86_32
16839+ if (!cpu)
16840+ per_cpu(stack_canary.canary, cpu) = canary;
16841+#endif
16842+#endif
16843 /*
16844 * Up to this point, the boot CPU has been using .init.data
16845 * area. Reload any changed state for the boot CPU.
16846diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
16847index 54ddaeb2..22c3bdc 100644
16848--- a/arch/x86/kernel/signal.c
16849+++ b/arch/x86/kernel/signal.c
16850@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
16851 * Align the stack pointer according to the i386 ABI,
16852 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16853 */
16854- sp = ((sp + 4) & -16ul) - 4;
16855+ sp = ((sp - 12) & -16ul) - 4;
16856 #else /* !CONFIG_X86_32 */
16857 sp = round_down(sp, 16) - 8;
16858 #endif
16859@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
16860 * Return an always-bogus address instead so we will die with SIGSEGV.
16861 */
16862 if (onsigstack && !likely(on_sig_stack(sp)))
16863- return (void __user *)-1L;
16864+ return (__force void __user *)-1L;
16865
16866 /* save i387 state */
16867 if (used_math() && save_i387_xstate(*fpstate) < 0)
16868- return (void __user *)-1L;
16869+ return (__force void __user *)-1L;
16870
16871 return (void __user *)sp;
16872 }
16873@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16874 }
16875
16876 if (current->mm->context.vdso)
16877- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16878+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16879 else
16880- restorer = &frame->retcode;
16881+ restorer = (void __user *)&frame->retcode;
16882 if (ka->sa.sa_flags & SA_RESTORER)
16883 restorer = ka->sa.sa_restorer;
16884
16885@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16886 * reasons and because gdb uses it as a signature to notice
16887 * signal handler stack frames.
16888 */
16889- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16890+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16891
16892 if (err)
16893 return -EFAULT;
16894@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16895 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16896
16897 /* Set up to return from userspace. */
16898- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16899+ if (current->mm->context.vdso)
16900+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16901+ else
16902+ restorer = (void __user *)&frame->retcode;
16903 if (ka->sa.sa_flags & SA_RESTORER)
16904 restorer = ka->sa.sa_restorer;
16905 put_user_ex(restorer, &frame->pretcode);
16906@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16907 * reasons and because gdb uses it as a signature to notice
16908 * signal handler stack frames.
16909 */
16910- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16911+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16912 } put_user_catch(err);
16913
16914 if (err)
16915@@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
16916 * X86_32: vm86 regs switched out by assembly code before reaching
16917 * here, so testing against kernel CS suffices.
16918 */
16919- if (!user_mode(regs))
16920+ if (!user_mode_novm(regs))
16921 return;
16922
16923 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
16924diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
16925index 9f548cb..caf76f7 100644
16926--- a/arch/x86/kernel/smpboot.c
16927+++ b/arch/x86/kernel/smpboot.c
16928@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
16929 set_idle_for_cpu(cpu, c_idle.idle);
16930 do_rest:
16931 per_cpu(current_task, cpu) = c_idle.idle;
16932+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16933 #ifdef CONFIG_X86_32
16934 /* Stack for startup_32 can be just as for start_secondary onwards */
16935 irq_ctx_init(cpu);
16936 #else
16937 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16938 initial_gs = per_cpu_offset(cpu);
16939- per_cpu(kernel_stack, cpu) =
16940- (unsigned long)task_stack_page(c_idle.idle) -
16941- KERNEL_STACK_OFFSET + THREAD_SIZE;
16942+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16943 #endif
16944+
16945+ pax_open_kernel();
16946 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16947+ pax_close_kernel();
16948+
16949 initial_code = (unsigned long)start_secondary;
16950 stack_start = c_idle.idle->thread.sp;
16951
16952@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
16953
16954 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16955
16956+#ifdef CONFIG_PAX_PER_CPU_PGD
16957+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16958+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16959+ KERNEL_PGD_PTRS);
16960+#endif
16961+
16962 err = do_boot_cpu(apicid, cpu);
16963 if (err) {
16964 pr_debug("do_boot_cpu failed %d\n", err);
16965diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
16966index c346d11..d43b163 100644
16967--- a/arch/x86/kernel/step.c
16968+++ b/arch/x86/kernel/step.c
16969@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16970 struct desc_struct *desc;
16971 unsigned long base;
16972
16973- seg &= ~7UL;
16974+ seg >>= 3;
16975
16976 mutex_lock(&child->mm->context.lock);
16977- if (unlikely((seg >> 3) >= child->mm->context.size))
16978+ if (unlikely(seg >= child->mm->context.size))
16979 addr = -1L; /* bogus selector, access would fault */
16980 else {
16981 desc = child->mm->context.ldt + seg;
16982@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16983 addr += base;
16984 }
16985 mutex_unlock(&child->mm->context.lock);
16986- }
16987+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16988+ addr = ktla_ktva(addr);
16989
16990 return addr;
16991 }
16992@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
16993 unsigned char opcode[15];
16994 unsigned long addr = convert_ip_to_linear(child, regs);
16995
16996+ if (addr == -EINVAL)
16997+ return 0;
16998+
16999 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
17000 for (i = 0; i < copied; i++) {
17001 switch (opcode[i]) {
17002diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
17003index 0b0cb5f..db6b9ed 100644
17004--- a/arch/x86/kernel/sys_i386_32.c
17005+++ b/arch/x86/kernel/sys_i386_32.c
17006@@ -24,17 +24,224 @@
17007
17008 #include <asm/syscalls.h>
17009
17010-/*
17011- * Do a system call from kernel instead of calling sys_execve so we
17012- * end up with proper pt_regs.
17013- */
17014-int kernel_execve(const char *filename,
17015- const char *const argv[],
17016- const char *const envp[])
17017+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17018 {
17019- long __res;
17020- asm volatile ("int $0x80"
17021- : "=a" (__res)
17022- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
17023- return __res;
17024+ unsigned long pax_task_size = TASK_SIZE;
17025+
17026+#ifdef CONFIG_PAX_SEGMEXEC
17027+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17028+ pax_task_size = SEGMEXEC_TASK_SIZE;
17029+#endif
17030+
17031+ if (len > pax_task_size || addr > pax_task_size - len)
17032+ return -EINVAL;
17033+
17034+ return 0;
17035+}
17036+
17037+unsigned long
17038+arch_get_unmapped_area(struct file *filp, unsigned long addr,
17039+ unsigned long len, unsigned long pgoff, unsigned long flags)
17040+{
17041+ struct mm_struct *mm = current->mm;
17042+ struct vm_area_struct *vma;
17043+ unsigned long start_addr, pax_task_size = TASK_SIZE;
17044+
17045+#ifdef CONFIG_PAX_SEGMEXEC
17046+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17047+ pax_task_size = SEGMEXEC_TASK_SIZE;
17048+#endif
17049+
17050+ pax_task_size -= PAGE_SIZE;
17051+
17052+ if (len > pax_task_size)
17053+ return -ENOMEM;
17054+
17055+ if (flags & MAP_FIXED)
17056+ return addr;
17057+
17058+#ifdef CONFIG_PAX_RANDMMAP
17059+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17060+#endif
17061+
17062+ if (addr) {
17063+ addr = PAGE_ALIGN(addr);
17064+ if (pax_task_size - len >= addr) {
17065+ vma = find_vma(mm, addr);
17066+ if (check_heap_stack_gap(vma, addr, len))
17067+ return addr;
17068+ }
17069+ }
17070+ if (len > mm->cached_hole_size) {
17071+ start_addr = addr = mm->free_area_cache;
17072+ } else {
17073+ start_addr = addr = mm->mmap_base;
17074+ mm->cached_hole_size = 0;
17075+ }
17076+
17077+#ifdef CONFIG_PAX_PAGEEXEC
17078+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17079+ start_addr = 0x00110000UL;
17080+
17081+#ifdef CONFIG_PAX_RANDMMAP
17082+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17083+ start_addr += mm->delta_mmap & 0x03FFF000UL;
17084+#endif
17085+
17086+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17087+ start_addr = addr = mm->mmap_base;
17088+ else
17089+ addr = start_addr;
17090+ }
17091+#endif
17092+
17093+full_search:
17094+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17095+ /* At this point: (!vma || addr < vma->vm_end). */
17096+ if (pax_task_size - len < addr) {
17097+ /*
17098+ * Start a new search - just in case we missed
17099+ * some holes.
17100+ */
17101+ if (start_addr != mm->mmap_base) {
17102+ start_addr = addr = mm->mmap_base;
17103+ mm->cached_hole_size = 0;
17104+ goto full_search;
17105+ }
17106+ return -ENOMEM;
17107+ }
17108+ if (check_heap_stack_gap(vma, addr, len))
17109+ break;
17110+ if (addr + mm->cached_hole_size < vma->vm_start)
17111+ mm->cached_hole_size = vma->vm_start - addr;
17112+ addr = vma->vm_end;
17113+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
17114+ start_addr = addr = mm->mmap_base;
17115+ mm->cached_hole_size = 0;
17116+ goto full_search;
17117+ }
17118+ }
17119+
17120+ /*
17121+ * Remember the place where we stopped the search:
17122+ */
17123+ mm->free_area_cache = addr + len;
17124+ return addr;
17125+}
17126+
17127+unsigned long
17128+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17129+ const unsigned long len, const unsigned long pgoff,
17130+ const unsigned long flags)
17131+{
17132+ struct vm_area_struct *vma;
17133+ struct mm_struct *mm = current->mm;
17134+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17135+
17136+#ifdef CONFIG_PAX_SEGMEXEC
17137+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17138+ pax_task_size = SEGMEXEC_TASK_SIZE;
17139+#endif
17140+
17141+ pax_task_size -= PAGE_SIZE;
17142+
17143+ /* requested length too big for entire address space */
17144+ if (len > pax_task_size)
17145+ return -ENOMEM;
17146+
17147+ if (flags & MAP_FIXED)
17148+ return addr;
17149+
17150+#ifdef CONFIG_PAX_PAGEEXEC
17151+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17152+ goto bottomup;
17153+#endif
17154+
17155+#ifdef CONFIG_PAX_RANDMMAP
17156+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17157+#endif
17158+
17159+ /* requesting a specific address */
17160+ if (addr) {
17161+ addr = PAGE_ALIGN(addr);
17162+ if (pax_task_size - len >= addr) {
17163+ vma = find_vma(mm, addr);
17164+ if (check_heap_stack_gap(vma, addr, len))
17165+ return addr;
17166+ }
17167+ }
17168+
17169+ /* check if free_area_cache is useful for us */
17170+ if (len <= mm->cached_hole_size) {
17171+ mm->cached_hole_size = 0;
17172+ mm->free_area_cache = mm->mmap_base;
17173+ }
17174+
17175+ /* either no address requested or can't fit in requested address hole */
17176+ addr = mm->free_area_cache;
17177+
17178+ /* make sure it can fit in the remaining address space */
17179+ if (addr > len) {
17180+ vma = find_vma(mm, addr-len);
17181+ if (check_heap_stack_gap(vma, addr - len, len))
17182+ /* remember the address as a hint for next time */
17183+ return (mm->free_area_cache = addr-len);
17184+ }
17185+
17186+ if (mm->mmap_base < len)
17187+ goto bottomup;
17188+
17189+ addr = mm->mmap_base-len;
17190+
17191+ do {
17192+ /*
17193+ * Lookup failure means no vma is above this address,
17194+ * else if new region fits below vma->vm_start,
17195+ * return with success:
17196+ */
17197+ vma = find_vma(mm, addr);
17198+ if (check_heap_stack_gap(vma, addr, len))
17199+ /* remember the address as a hint for next time */
17200+ return (mm->free_area_cache = addr);
17201+
17202+ /* remember the largest hole we saw so far */
17203+ if (addr + mm->cached_hole_size < vma->vm_start)
17204+ mm->cached_hole_size = vma->vm_start - addr;
17205+
17206+ /* try just below the current vma->vm_start */
17207+ addr = skip_heap_stack_gap(vma, len);
17208+ } while (!IS_ERR_VALUE(addr));
17209+
17210+bottomup:
17211+ /*
17212+ * A failed mmap() very likely causes application failure,
17213+ * so fall back to the bottom-up function here. This scenario
17214+ * can happen with large stack limits and large mmap()
17215+ * allocations.
17216+ */
17217+
17218+#ifdef CONFIG_PAX_SEGMEXEC
17219+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17220+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17221+ else
17222+#endif
17223+
17224+ mm->mmap_base = TASK_UNMAPPED_BASE;
17225+
17226+#ifdef CONFIG_PAX_RANDMMAP
17227+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17228+ mm->mmap_base += mm->delta_mmap;
17229+#endif
17230+
17231+ mm->free_area_cache = mm->mmap_base;
17232+ mm->cached_hole_size = ~0UL;
17233+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17234+ /*
17235+ * Restore the topdown base:
17236+ */
17237+ mm->mmap_base = base;
17238+ mm->free_area_cache = base;
17239+ mm->cached_hole_size = ~0UL;
17240+
17241+ return addr;
17242 }
17243diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17244index 0514890..3dbebce 100644
17245--- a/arch/x86/kernel/sys_x86_64.c
17246+++ b/arch/x86/kernel/sys_x86_64.c
17247@@ -95,8 +95,8 @@ out:
17248 return error;
17249 }
17250
17251-static void find_start_end(unsigned long flags, unsigned long *begin,
17252- unsigned long *end)
17253+static void find_start_end(struct mm_struct *mm, unsigned long flags,
17254+ unsigned long *begin, unsigned long *end)
17255 {
17256 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17257 unsigned long new_begin;
17258@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17259 *begin = new_begin;
17260 }
17261 } else {
17262- *begin = TASK_UNMAPPED_BASE;
17263+ *begin = mm->mmap_base;
17264 *end = TASK_SIZE;
17265 }
17266 }
17267@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17268 if (flags & MAP_FIXED)
17269 return addr;
17270
17271- find_start_end(flags, &begin, &end);
17272+ find_start_end(mm, flags, &begin, &end);
17273
17274 if (len > end)
17275 return -ENOMEM;
17276
17277+#ifdef CONFIG_PAX_RANDMMAP
17278+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17279+#endif
17280+
17281 if (addr) {
17282 addr = PAGE_ALIGN(addr);
17283 vma = find_vma(mm, addr);
17284- if (end - len >= addr &&
17285- (!vma || addr + len <= vma->vm_start))
17286+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17287 return addr;
17288 }
17289 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17290@@ -172,7 +175,7 @@ full_search:
17291 }
17292 return -ENOMEM;
17293 }
17294- if (!vma || addr + len <= vma->vm_start) {
17295+ if (check_heap_stack_gap(vma, addr, len)) {
17296 /*
17297 * Remember the place where we stopped the search:
17298 */
17299@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17300 {
17301 struct vm_area_struct *vma;
17302 struct mm_struct *mm = current->mm;
17303- unsigned long addr = addr0;
17304+ unsigned long base = mm->mmap_base, addr = addr0;
17305
17306 /* requested length too big for entire address space */
17307 if (len > TASK_SIZE)
17308@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17309 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17310 goto bottomup;
17311
17312+#ifdef CONFIG_PAX_RANDMMAP
17313+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17314+#endif
17315+
17316 /* requesting a specific address */
17317 if (addr) {
17318 addr = PAGE_ALIGN(addr);
17319- vma = find_vma(mm, addr);
17320- if (TASK_SIZE - len >= addr &&
17321- (!vma || addr + len <= vma->vm_start))
17322- return addr;
17323+ if (TASK_SIZE - len >= addr) {
17324+ vma = find_vma(mm, addr);
17325+ if (check_heap_stack_gap(vma, addr, len))
17326+ return addr;
17327+ }
17328 }
17329
17330 /* check if free_area_cache is useful for us */
17331@@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17332 ALIGN_TOPDOWN);
17333
17334 vma = find_vma(mm, tmp_addr);
17335- if (!vma || tmp_addr + len <= vma->vm_start)
17336+ if (check_heap_stack_gap(vma, tmp_addr, len))
17337 /* remember the address as a hint for next time */
17338 return mm->free_area_cache = tmp_addr;
17339 }
17340@@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17341 * return with success:
17342 */
17343 vma = find_vma(mm, addr);
17344- if (!vma || addr+len <= vma->vm_start)
17345+ if (check_heap_stack_gap(vma, addr, len))
17346 /* remember the address as a hint for next time */
17347 return mm->free_area_cache = addr;
17348
17349@@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17350 mm->cached_hole_size = vma->vm_start - addr;
17351
17352 /* try just below the current vma->vm_start */
17353- addr = vma->vm_start-len;
17354- } while (len < vma->vm_start);
17355+ addr = skip_heap_stack_gap(vma, len);
17356+ } while (!IS_ERR_VALUE(addr));
17357
17358 bottomup:
17359 /*
17360@@ -270,13 +278,21 @@ bottomup:
17361 * can happen with large stack limits and large mmap()
17362 * allocations.
17363 */
17364+ mm->mmap_base = TASK_UNMAPPED_BASE;
17365+
17366+#ifdef CONFIG_PAX_RANDMMAP
17367+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17368+ mm->mmap_base += mm->delta_mmap;
17369+#endif
17370+
17371+ mm->free_area_cache = mm->mmap_base;
17372 mm->cached_hole_size = ~0UL;
17373- mm->free_area_cache = TASK_UNMAPPED_BASE;
17374 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17375 /*
17376 * Restore the topdown base:
17377 */
17378- mm->free_area_cache = mm->mmap_base;
17379+ mm->mmap_base = base;
17380+ mm->free_area_cache = base;
17381 mm->cached_hole_size = ~0UL;
17382
17383 return addr;
17384diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17385index 9a0e312..e6f66f2 100644
17386--- a/arch/x86/kernel/syscall_table_32.S
17387+++ b/arch/x86/kernel/syscall_table_32.S
17388@@ -1,3 +1,4 @@
17389+.section .rodata,"a",@progbits
17390 ENTRY(sys_call_table)
17391 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17392 .long sys_exit
17393diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17394index e2410e2..4fe3fbc 100644
17395--- a/arch/x86/kernel/tboot.c
17396+++ b/arch/x86/kernel/tboot.c
17397@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
17398
17399 void tboot_shutdown(u32 shutdown_type)
17400 {
17401- void (*shutdown)(void);
17402+ void (* __noreturn shutdown)(void);
17403
17404 if (!tboot_enabled())
17405 return;
17406@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
17407
17408 switch_to_tboot_pt();
17409
17410- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17411+ shutdown = (void *)tboot->shutdown_entry;
17412 shutdown();
17413
17414 /* should not reach here */
17415@@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17416 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17417 }
17418
17419-static atomic_t ap_wfs_count;
17420+static atomic_unchecked_t ap_wfs_count;
17421
17422 static int tboot_wait_for_aps(int num_aps)
17423 {
17424@@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17425 {
17426 switch (action) {
17427 case CPU_DYING:
17428- atomic_inc(&ap_wfs_count);
17429+ atomic_inc_unchecked(&ap_wfs_count);
17430 if (num_online_cpus() == 1)
17431- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17432+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17433 return NOTIFY_BAD;
17434 break;
17435 }
17436@@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
17437
17438 tboot_create_trampoline();
17439
17440- atomic_set(&ap_wfs_count, 0);
17441+ atomic_set_unchecked(&ap_wfs_count, 0);
17442 register_hotcpu_notifier(&tboot_cpu_notifier);
17443 return 0;
17444 }
17445diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17446index dd5fbf4..b7f2232 100644
17447--- a/arch/x86/kernel/time.c
17448+++ b/arch/x86/kernel/time.c
17449@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17450 {
17451 unsigned long pc = instruction_pointer(regs);
17452
17453- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17454+ if (!user_mode(regs) && in_lock_functions(pc)) {
17455 #ifdef CONFIG_FRAME_POINTER
17456- return *(unsigned long *)(regs->bp + sizeof(long));
17457+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17458 #else
17459 unsigned long *sp =
17460 (unsigned long *)kernel_stack_pointer(regs);
17461@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17462 * or above a saved flags. Eflags has bits 22-31 zero,
17463 * kernel addresses don't.
17464 */
17465+
17466+#ifdef CONFIG_PAX_KERNEXEC
17467+ return ktla_ktva(sp[0]);
17468+#else
17469 if (sp[0] >> 22)
17470 return sp[0];
17471 if (sp[1] >> 22)
17472 return sp[1];
17473 #endif
17474+
17475+#endif
17476 }
17477 return pc;
17478 }
17479diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17480index 6bb7b85..dd853e1 100644
17481--- a/arch/x86/kernel/tls.c
17482+++ b/arch/x86/kernel/tls.c
17483@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17484 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17485 return -EINVAL;
17486
17487+#ifdef CONFIG_PAX_SEGMEXEC
17488+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17489+ return -EINVAL;
17490+#endif
17491+
17492 set_tls_desc(p, idx, &info, 1);
17493
17494 return 0;
17495diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17496index 451c0a7..e57f551 100644
17497--- a/arch/x86/kernel/trampoline_32.S
17498+++ b/arch/x86/kernel/trampoline_32.S
17499@@ -32,6 +32,12 @@
17500 #include <asm/segment.h>
17501 #include <asm/page_types.h>
17502
17503+#ifdef CONFIG_PAX_KERNEXEC
17504+#define ta(X) (X)
17505+#else
17506+#define ta(X) ((X) - __PAGE_OFFSET)
17507+#endif
17508+
17509 #ifdef CONFIG_SMP
17510
17511 .section ".x86_trampoline","a"
17512@@ -62,7 +68,7 @@ r_base = .
17513 inc %ax # protected mode (PE) bit
17514 lmsw %ax # into protected mode
17515 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17516- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17517+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17518
17519 # These need to be in the same 64K segment as the above;
17520 # hence we don't use the boot_gdt_descr defined in head.S
17521diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17522index 09ff517..df19fbff 100644
17523--- a/arch/x86/kernel/trampoline_64.S
17524+++ b/arch/x86/kernel/trampoline_64.S
17525@@ -90,7 +90,7 @@ startup_32:
17526 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17527 movl %eax, %ds
17528
17529- movl $X86_CR4_PAE, %eax
17530+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17531 movl %eax, %cr4 # Enable PAE mode
17532
17533 # Setup trampoline 4 level pagetables
17534@@ -138,7 +138,7 @@ tidt:
17535 # so the kernel can live anywhere
17536 .balign 4
17537 tgdt:
17538- .short tgdt_end - tgdt # gdt limit
17539+ .short tgdt_end - tgdt - 1 # gdt limit
17540 .long tgdt - r_base
17541 .short 0
17542 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17543diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17544index a8e3eb8..c9dbd7d 100644
17545--- a/arch/x86/kernel/traps.c
17546+++ b/arch/x86/kernel/traps.c
17547@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17548
17549 /* Do we ignore FPU interrupts ? */
17550 char ignore_fpu_irq;
17551-
17552-/*
17553- * The IDT has to be page-aligned to simplify the Pentium
17554- * F0 0F bug workaround.
17555- */
17556-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17557 #endif
17558
17559 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17560@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17561 }
17562
17563 static void __kprobes
17564-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17565+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17566 long error_code, siginfo_t *info)
17567 {
17568 struct task_struct *tsk = current;
17569
17570 #ifdef CONFIG_X86_32
17571- if (regs->flags & X86_VM_MASK) {
17572+ if (v8086_mode(regs)) {
17573 /*
17574 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17575 * On nmi (interrupt 2), do_trap should not be called.
17576@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17577 }
17578 #endif
17579
17580- if (!user_mode(regs))
17581+ if (!user_mode_novm(regs))
17582 goto kernel_trap;
17583
17584 #ifdef CONFIG_X86_32
17585@@ -148,7 +142,7 @@ trap_signal:
17586 printk_ratelimit()) {
17587 printk(KERN_INFO
17588 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17589- tsk->comm, tsk->pid, str,
17590+ tsk->comm, task_pid_nr(tsk), str,
17591 regs->ip, regs->sp, error_code);
17592 print_vma_addr(" in ", regs->ip);
17593 printk("\n");
17594@@ -165,8 +159,20 @@ kernel_trap:
17595 if (!fixup_exception(regs)) {
17596 tsk->thread.error_code = error_code;
17597 tsk->thread.trap_no = trapnr;
17598+
17599+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17600+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17601+ str = "PAX: suspicious stack segment fault";
17602+#endif
17603+
17604 die(str, regs, error_code);
17605 }
17606+
17607+#ifdef CONFIG_PAX_REFCOUNT
17608+ if (trapnr == 4)
17609+ pax_report_refcount_overflow(regs);
17610+#endif
17611+
17612 return;
17613
17614 #ifdef CONFIG_X86_32
17615@@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17616 conditional_sti(regs);
17617
17618 #ifdef CONFIG_X86_32
17619- if (regs->flags & X86_VM_MASK)
17620+ if (v8086_mode(regs))
17621 goto gp_in_vm86;
17622 #endif
17623
17624 tsk = current;
17625- if (!user_mode(regs))
17626+ if (!user_mode_novm(regs))
17627 goto gp_in_kernel;
17628
17629+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17630+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17631+ struct mm_struct *mm = tsk->mm;
17632+ unsigned long limit;
17633+
17634+ down_write(&mm->mmap_sem);
17635+ limit = mm->context.user_cs_limit;
17636+ if (limit < TASK_SIZE) {
17637+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17638+ up_write(&mm->mmap_sem);
17639+ return;
17640+ }
17641+ up_write(&mm->mmap_sem);
17642+ }
17643+#endif
17644+
17645 tsk->thread.error_code = error_code;
17646 tsk->thread.trap_no = 13;
17647
17648@@ -295,6 +317,13 @@ gp_in_kernel:
17649 if (notify_die(DIE_GPF, "general protection fault", regs,
17650 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17651 return;
17652+
17653+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17654+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17655+ die("PAX: suspicious general protection fault", regs, error_code);
17656+ else
17657+#endif
17658+
17659 die("general protection fault", regs, error_code);
17660 }
17661
17662@@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17663 /* It's safe to allow irq's after DR6 has been saved */
17664 preempt_conditional_sti(regs);
17665
17666- if (regs->flags & X86_VM_MASK) {
17667+ if (v8086_mode(regs)) {
17668 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17669 error_code, 1);
17670 preempt_conditional_cli(regs);
17671@@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17672 * We already checked v86 mode above, so we can check for kernel mode
17673 * by just checking the CPL of CS.
17674 */
17675- if ((dr6 & DR_STEP) && !user_mode(regs)) {
17676+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17677 tsk->thread.debugreg6 &= ~DR_STEP;
17678 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17679 regs->flags &= ~X86_EFLAGS_TF;
17680@@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17681 return;
17682 conditional_sti(regs);
17683
17684- if (!user_mode_vm(regs))
17685+ if (!user_mode(regs))
17686 {
17687 if (!fixup_exception(regs)) {
17688 task->thread.error_code = error_code;
17689@@ -568,7 +597,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17690 void __math_state_restore(void)
17691 {
17692 struct thread_info *thread = current_thread_info();
17693- struct task_struct *tsk = thread->task;
17694+ struct task_struct *tsk = current;
17695
17696 /*
17697 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17698@@ -595,8 +624,7 @@ void __math_state_restore(void)
17699 */
17700 asmlinkage void math_state_restore(void)
17701 {
17702- struct thread_info *thread = current_thread_info();
17703- struct task_struct *tsk = thread->task;
17704+ struct task_struct *tsk = current;
17705
17706 if (!tsk_used_math(tsk)) {
17707 local_irq_enable();
17708diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17709index b9242ba..50c5edd 100644
17710--- a/arch/x86/kernel/verify_cpu.S
17711+++ b/arch/x86/kernel/verify_cpu.S
17712@@ -20,6 +20,7 @@
17713 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17714 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17715 * arch/x86/kernel/head_32.S: processor startup
17716+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17717 *
17718 * verify_cpu, returns the status of longmode and SSE in register %eax.
17719 * 0: Success 1: Failure
17720diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17721index 863f875..4307295 100644
17722--- a/arch/x86/kernel/vm86_32.c
17723+++ b/arch/x86/kernel/vm86_32.c
17724@@ -41,6 +41,7 @@
17725 #include <linux/ptrace.h>
17726 #include <linux/audit.h>
17727 #include <linux/stddef.h>
17728+#include <linux/grsecurity.h>
17729
17730 #include <asm/uaccess.h>
17731 #include <asm/io.h>
17732@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
17733 do_exit(SIGSEGV);
17734 }
17735
17736- tss = &per_cpu(init_tss, get_cpu());
17737+ tss = init_tss + get_cpu();
17738 current->thread.sp0 = current->thread.saved_sp0;
17739 current->thread.sysenter_cs = __KERNEL_CS;
17740 load_sp0(tss, &current->thread);
17741@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
17742 struct task_struct *tsk;
17743 int tmp, ret = -EPERM;
17744
17745+#ifdef CONFIG_GRKERNSEC_VM86
17746+ if (!capable(CAP_SYS_RAWIO)) {
17747+ gr_handle_vm86();
17748+ goto out;
17749+ }
17750+#endif
17751+
17752 tsk = current;
17753 if (tsk->thread.saved_sp0)
17754 goto out;
17755@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
17756 int tmp, ret;
17757 struct vm86plus_struct __user *v86;
17758
17759+#ifdef CONFIG_GRKERNSEC_VM86
17760+ if (!capable(CAP_SYS_RAWIO)) {
17761+ gr_handle_vm86();
17762+ ret = -EPERM;
17763+ goto out;
17764+ }
17765+#endif
17766+
17767 tsk = current;
17768 switch (cmd) {
17769 case VM86_REQUEST_IRQ:
17770@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
17771 tsk->thread.saved_fs = info->regs32->fs;
17772 tsk->thread.saved_gs = get_user_gs(info->regs32);
17773
17774- tss = &per_cpu(init_tss, get_cpu());
17775+ tss = init_tss + get_cpu();
17776 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17777 if (cpu_has_sep)
17778 tsk->thread.sysenter_cs = 0;
17779@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
17780 goto cannot_handle;
17781 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17782 goto cannot_handle;
17783- intr_ptr = (unsigned long __user *) (i << 2);
17784+ intr_ptr = (__force unsigned long __user *) (i << 2);
17785 if (get_user(segoffs, intr_ptr))
17786 goto cannot_handle;
17787 if ((segoffs >> 16) == BIOSSEG)
17788diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
17789index 0f703f1..9e15f64 100644
17790--- a/arch/x86/kernel/vmlinux.lds.S
17791+++ b/arch/x86/kernel/vmlinux.lds.S
17792@@ -26,6 +26,13 @@
17793 #include <asm/page_types.h>
17794 #include <asm/cache.h>
17795 #include <asm/boot.h>
17796+#include <asm/segment.h>
17797+
17798+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17799+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17800+#else
17801+#define __KERNEL_TEXT_OFFSET 0
17802+#endif
17803
17804 #undef i386 /* in case the preprocessor is a 32bit one */
17805
17806@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
17807
17808 PHDRS {
17809 text PT_LOAD FLAGS(5); /* R_E */
17810+#ifdef CONFIG_X86_32
17811+ module PT_LOAD FLAGS(5); /* R_E */
17812+#endif
17813+#ifdef CONFIG_XEN
17814+ rodata PT_LOAD FLAGS(5); /* R_E */
17815+#else
17816+ rodata PT_LOAD FLAGS(4); /* R__ */
17817+#endif
17818 data PT_LOAD FLAGS(6); /* RW_ */
17819-#ifdef CONFIG_X86_64
17820+ init.begin PT_LOAD FLAGS(6); /* RW_ */
17821 #ifdef CONFIG_SMP
17822 percpu PT_LOAD FLAGS(6); /* RW_ */
17823 #endif
17824+ text.init PT_LOAD FLAGS(5); /* R_E */
17825+ text.exit PT_LOAD FLAGS(5); /* R_E */
17826 init PT_LOAD FLAGS(7); /* RWE */
17827-#endif
17828 note PT_NOTE FLAGS(0); /* ___ */
17829 }
17830
17831 SECTIONS
17832 {
17833 #ifdef CONFIG_X86_32
17834- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17835- phys_startup_32 = startup_32 - LOAD_OFFSET;
17836+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17837 #else
17838- . = __START_KERNEL;
17839- phys_startup_64 = startup_64 - LOAD_OFFSET;
17840+ . = __START_KERNEL;
17841 #endif
17842
17843 /* Text and read-only data */
17844- .text : AT(ADDR(.text) - LOAD_OFFSET) {
17845- _text = .;
17846+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17847 /* bootstrapping code */
17848+#ifdef CONFIG_X86_32
17849+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17850+#else
17851+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17852+#endif
17853+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17854+ _text = .;
17855 HEAD_TEXT
17856 #ifdef CONFIG_X86_32
17857 . = ALIGN(PAGE_SIZE);
17858@@ -108,13 +128,47 @@ SECTIONS
17859 IRQENTRY_TEXT
17860 *(.fixup)
17861 *(.gnu.warning)
17862- /* End of text section */
17863- _etext = .;
17864 } :text = 0x9090
17865
17866- NOTES :text :note
17867+ . += __KERNEL_TEXT_OFFSET;
17868
17869- EXCEPTION_TABLE(16) :text = 0x9090
17870+#ifdef CONFIG_X86_32
17871+ . = ALIGN(PAGE_SIZE);
17872+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17873+
17874+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17875+ MODULES_EXEC_VADDR = .;
17876+ BYTE(0)
17877+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17878+ . = ALIGN(HPAGE_SIZE);
17879+ MODULES_EXEC_END = . - 1;
17880+#endif
17881+
17882+ } :module
17883+#endif
17884+
17885+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17886+ /* End of text section */
17887+ _etext = . - __KERNEL_TEXT_OFFSET;
17888+ }
17889+
17890+#ifdef CONFIG_X86_32
17891+ . = ALIGN(PAGE_SIZE);
17892+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17893+ *(.idt)
17894+ . = ALIGN(PAGE_SIZE);
17895+ *(.empty_zero_page)
17896+ *(.initial_pg_fixmap)
17897+ *(.initial_pg_pmd)
17898+ *(.initial_page_table)
17899+ *(.swapper_pg_dir)
17900+ } :rodata
17901+#endif
17902+
17903+ . = ALIGN(PAGE_SIZE);
17904+ NOTES :rodata :note
17905+
17906+ EXCEPTION_TABLE(16) :rodata
17907
17908 #if defined(CONFIG_DEBUG_RODATA)
17909 /* .text should occupy whole number of pages */
17910@@ -126,16 +180,20 @@ SECTIONS
17911
17912 /* Data */
17913 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17914+
17915+#ifdef CONFIG_PAX_KERNEXEC
17916+ . = ALIGN(HPAGE_SIZE);
17917+#else
17918+ . = ALIGN(PAGE_SIZE);
17919+#endif
17920+
17921 /* Start of data section */
17922 _sdata = .;
17923
17924 /* init_task */
17925 INIT_TASK_DATA(THREAD_SIZE)
17926
17927-#ifdef CONFIG_X86_32
17928- /* 32 bit has nosave before _edata */
17929 NOSAVE_DATA
17930-#endif
17931
17932 PAGE_ALIGNED_DATA(PAGE_SIZE)
17933
17934@@ -176,12 +234,19 @@ SECTIONS
17935 #endif /* CONFIG_X86_64 */
17936
17937 /* Init code and data - will be freed after init */
17938- . = ALIGN(PAGE_SIZE);
17939 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17940+ BYTE(0)
17941+
17942+#ifdef CONFIG_PAX_KERNEXEC
17943+ . = ALIGN(HPAGE_SIZE);
17944+#else
17945+ . = ALIGN(PAGE_SIZE);
17946+#endif
17947+
17948 __init_begin = .; /* paired with __init_end */
17949- }
17950+ } :init.begin
17951
17952-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17953+#ifdef CONFIG_SMP
17954 /*
17955 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17956 * output PHDR, so the next output section - .init.text - should
17957@@ -190,12 +255,27 @@ SECTIONS
17958 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
17959 #endif
17960
17961- INIT_TEXT_SECTION(PAGE_SIZE)
17962-#ifdef CONFIG_X86_64
17963- :init
17964-#endif
17965+ . = ALIGN(PAGE_SIZE);
17966+ init_begin = .;
17967+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17968+ VMLINUX_SYMBOL(_sinittext) = .;
17969+ INIT_TEXT
17970+ VMLINUX_SYMBOL(_einittext) = .;
17971+ . = ALIGN(PAGE_SIZE);
17972+ } :text.init
17973
17974- INIT_DATA_SECTION(16)
17975+ /*
17976+ * .exit.text is discard at runtime, not link time, to deal with
17977+ * references from .altinstructions and .eh_frame
17978+ */
17979+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17980+ EXIT_TEXT
17981+ . = ALIGN(16);
17982+ } :text.exit
17983+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
17984+
17985+ . = ALIGN(PAGE_SIZE);
17986+ INIT_DATA_SECTION(16) :init
17987
17988 /*
17989 * Code and data for a variety of lowlevel trampolines, to be
17990@@ -269,19 +349,12 @@ SECTIONS
17991 }
17992
17993 . = ALIGN(8);
17994- /*
17995- * .exit.text is discard at runtime, not link time, to deal with
17996- * references from .altinstructions and .eh_frame
17997- */
17998- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
17999- EXIT_TEXT
18000- }
18001
18002 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18003 EXIT_DATA
18004 }
18005
18006-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18007+#ifndef CONFIG_SMP
18008 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
18009 #endif
18010
18011@@ -300,16 +373,10 @@ SECTIONS
18012 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
18013 __smp_locks = .;
18014 *(.smp_locks)
18015- . = ALIGN(PAGE_SIZE);
18016 __smp_locks_end = .;
18017+ . = ALIGN(PAGE_SIZE);
18018 }
18019
18020-#ifdef CONFIG_X86_64
18021- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18022- NOSAVE_DATA
18023- }
18024-#endif
18025-
18026 /* BSS */
18027 . = ALIGN(PAGE_SIZE);
18028 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18029@@ -325,6 +392,7 @@ SECTIONS
18030 __brk_base = .;
18031 . += 64 * 1024; /* 64k alignment slop space */
18032 *(.brk_reservation) /* areas brk users have reserved */
18033+ . = ALIGN(HPAGE_SIZE);
18034 __brk_limit = .;
18035 }
18036
18037@@ -351,13 +419,12 @@ SECTIONS
18038 * for the boot processor.
18039 */
18040 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
18041-INIT_PER_CPU(gdt_page);
18042 INIT_PER_CPU(irq_stack_union);
18043
18044 /*
18045 * Build-time check on the image size:
18046 */
18047-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18048+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18049 "kernel image bigger than KERNEL_IMAGE_SIZE");
18050
18051 #ifdef CONFIG_SMP
18052diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18053index e4d4a22..47ee71f 100644
18054--- a/arch/x86/kernel/vsyscall_64.c
18055+++ b/arch/x86/kernel/vsyscall_64.c
18056@@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18057 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18058 };
18059
18060-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18061+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18062
18063 static int __init vsyscall_setup(char *str)
18064 {
18065 if (str) {
18066 if (!strcmp("emulate", str))
18067 vsyscall_mode = EMULATE;
18068- else if (!strcmp("native", str))
18069- vsyscall_mode = NATIVE;
18070 else if (!strcmp("none", str))
18071 vsyscall_mode = NONE;
18072 else
18073@@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18074
18075 tsk = current;
18076 if (seccomp_mode(&tsk->seccomp))
18077- do_exit(SIGKILL);
18078+ do_group_exit(SIGKILL);
18079
18080 switch (vsyscall_nr) {
18081 case 0:
18082@@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18083 return true;
18084
18085 sigsegv:
18086- force_sig(SIGSEGV, current);
18087- return true;
18088+ do_group_exit(SIGKILL);
18089 }
18090
18091 /*
18092@@ -274,10 +271,7 @@ void __init map_vsyscall(void)
18093 extern char __vvar_page;
18094 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18095
18096- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18097- vsyscall_mode == NATIVE
18098- ? PAGE_KERNEL_VSYSCALL
18099- : PAGE_KERNEL_VVAR);
18100+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18101 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18102 (unsigned long)VSYSCALL_START);
18103
18104diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18105index 9796c2f..f686fbf 100644
18106--- a/arch/x86/kernel/x8664_ksyms_64.c
18107+++ b/arch/x86/kernel/x8664_ksyms_64.c
18108@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18109 EXPORT_SYMBOL(copy_user_generic_string);
18110 EXPORT_SYMBOL(copy_user_generic_unrolled);
18111 EXPORT_SYMBOL(__copy_user_nocache);
18112-EXPORT_SYMBOL(_copy_from_user);
18113-EXPORT_SYMBOL(_copy_to_user);
18114
18115 EXPORT_SYMBOL(copy_page);
18116 EXPORT_SYMBOL(clear_page);
18117diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18118index a391134..d0b63b6e 100644
18119--- a/arch/x86/kernel/xsave.c
18120+++ b/arch/x86/kernel/xsave.c
18121@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18122 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18123 return -EINVAL;
18124
18125- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18126+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18127 fx_sw_user->extended_size -
18128 FP_XSTATE_MAGIC2_SIZE));
18129 if (err)
18130@@ -267,7 +267,7 @@ fx_only:
18131 * the other extended state.
18132 */
18133 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18134- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18135+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18136 }
18137
18138 /*
18139@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
18140 if (use_xsave())
18141 err = restore_user_xstate(buf);
18142 else
18143- err = fxrstor_checking((__force struct i387_fxsave_struct *)
18144+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18145 buf);
18146 if (unlikely(err)) {
18147 /*
18148diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18149index f1e3be1..588efc8 100644
18150--- a/arch/x86/kvm/emulate.c
18151+++ b/arch/x86/kvm/emulate.c
18152@@ -249,6 +249,7 @@ struct gprefix {
18153
18154 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
18155 do { \
18156+ unsigned long _tmp; \
18157 __asm__ __volatile__ ( \
18158 _PRE_EFLAGS("0", "4", "2") \
18159 _op _suffix " %"_x"3,%1; " \
18160@@ -263,8 +264,6 @@ struct gprefix {
18161 /* Raw emulation: instruction has two explicit operands. */
18162 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
18163 do { \
18164- unsigned long _tmp; \
18165- \
18166 switch ((ctxt)->dst.bytes) { \
18167 case 2: \
18168 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
18169@@ -280,7 +279,6 @@ struct gprefix {
18170
18171 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18172 do { \
18173- unsigned long _tmp; \
18174 switch ((ctxt)->dst.bytes) { \
18175 case 1: \
18176 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
18177diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18178index 54abb40..a192606 100644
18179--- a/arch/x86/kvm/lapic.c
18180+++ b/arch/x86/kvm/lapic.c
18181@@ -53,7 +53,7 @@
18182 #define APIC_BUS_CYCLE_NS 1
18183
18184 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18185-#define apic_debug(fmt, arg...)
18186+#define apic_debug(fmt, arg...) do {} while (0)
18187
18188 #define APIC_LVT_NUM 6
18189 /* 14 is the version for Xeon and Pentium 8.4.8*/
18190diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18191index f1b36cf..af8a124 100644
18192--- a/arch/x86/kvm/mmu.c
18193+++ b/arch/x86/kvm/mmu.c
18194@@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18195
18196 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18197
18198- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18199+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18200
18201 /*
18202 * Assume that the pte write on a page table of the same type
18203@@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18204 }
18205
18206 spin_lock(&vcpu->kvm->mmu_lock);
18207- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18208+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18209 gentry = 0;
18210 kvm_mmu_free_some_pages(vcpu);
18211 ++vcpu->kvm->stat.mmu_pte_write;
18212diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18213index 9299410..ade2f9b 100644
18214--- a/arch/x86/kvm/paging_tmpl.h
18215+++ b/arch/x86/kvm/paging_tmpl.h
18216@@ -197,7 +197,7 @@ retry_walk:
18217 if (unlikely(kvm_is_error_hva(host_addr)))
18218 goto error;
18219
18220- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18221+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18222 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18223 goto error;
18224
18225@@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18226 if (need_flush)
18227 kvm_flush_remote_tlbs(vcpu->kvm);
18228
18229- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18230+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18231
18232 spin_unlock(&vcpu->kvm->mmu_lock);
18233
18234diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18235index e32243e..a6e6172 100644
18236--- a/arch/x86/kvm/svm.c
18237+++ b/arch/x86/kvm/svm.c
18238@@ -3400,7 +3400,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18239 int cpu = raw_smp_processor_id();
18240
18241 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18242+
18243+ pax_open_kernel();
18244 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18245+ pax_close_kernel();
18246+
18247 load_TR_desc();
18248 }
18249
18250@@ -3778,6 +3782,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18251 #endif
18252 #endif
18253
18254+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18255+ __set_fs(current_thread_info()->addr_limit);
18256+#endif
18257+
18258 reload_tss(vcpu);
18259
18260 local_irq_disable();
18261diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18262index 579a0b5..ed7bbf9 100644
18263--- a/arch/x86/kvm/vmx.c
18264+++ b/arch/x86/kvm/vmx.c
18265@@ -1305,7 +1305,11 @@ static void reload_tss(void)
18266 struct desc_struct *descs;
18267
18268 descs = (void *)gdt->address;
18269+
18270+ pax_open_kernel();
18271 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18272+ pax_close_kernel();
18273+
18274 load_TR_desc();
18275 }
18276
18277@@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
18278 if (!cpu_has_vmx_flexpriority())
18279 flexpriority_enabled = 0;
18280
18281- if (!cpu_has_vmx_tpr_shadow())
18282- kvm_x86_ops->update_cr8_intercept = NULL;
18283+ if (!cpu_has_vmx_tpr_shadow()) {
18284+ pax_open_kernel();
18285+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18286+ pax_close_kernel();
18287+ }
18288
18289 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18290 kvm_disable_largepages();
18291@@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
18292 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18293
18294 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18295- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18296+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18297
18298 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18299 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18300@@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18301 "jmp .Lkvm_vmx_return \n\t"
18302 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18303 ".Lkvm_vmx_return: "
18304+
18305+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18306+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18307+ ".Lkvm_vmx_return2: "
18308+#endif
18309+
18310 /* Save guest registers, load host registers, keep flags */
18311 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18312 "pop %0 \n\t"
18313@@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18314 #endif
18315 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18316 [wordsize]"i"(sizeof(ulong))
18317+
18318+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18319+ ,[cs]"i"(__KERNEL_CS)
18320+#endif
18321+
18322 : "cc", "memory"
18323 , R"ax", R"bx", R"di", R"si"
18324 #ifdef CONFIG_X86_64
18325@@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18326 }
18327 }
18328
18329- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18330+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18331+
18332+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18333+ loadsegment(fs, __KERNEL_PERCPU);
18334+#endif
18335+
18336+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18337+ __set_fs(current_thread_info()->addr_limit);
18338+#endif
18339+
18340 vmx->loaded_vmcs->launched = 1;
18341
18342 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18343diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18344index 4c938da..4ddef65 100644
18345--- a/arch/x86/kvm/x86.c
18346+++ b/arch/x86/kvm/x86.c
18347@@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18348 {
18349 struct kvm *kvm = vcpu->kvm;
18350 int lm = is_long_mode(vcpu);
18351- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18352- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18353+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18354+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18355 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18356 : kvm->arch.xen_hvm_config.blob_size_32;
18357 u32 page_num = data & ~PAGE_MASK;
18358@@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18359 if (n < msr_list.nmsrs)
18360 goto out;
18361 r = -EFAULT;
18362+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18363+ goto out;
18364 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18365 num_msrs_to_save * sizeof(u32)))
18366 goto out;
18367@@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18368 struct kvm_cpuid2 *cpuid,
18369 struct kvm_cpuid_entry2 __user *entries)
18370 {
18371- int r;
18372+ int r, i;
18373
18374 r = -E2BIG;
18375 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18376 goto out;
18377 r = -EFAULT;
18378- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18379- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18380+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18381 goto out;
18382+ for (i = 0; i < cpuid->nent; ++i) {
18383+ struct kvm_cpuid_entry2 cpuid_entry;
18384+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18385+ goto out;
18386+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18387+ }
18388 vcpu->arch.cpuid_nent = cpuid->nent;
18389 kvm_apic_set_version(vcpu);
18390 kvm_x86_ops->cpuid_update(vcpu);
18391@@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18392 struct kvm_cpuid2 *cpuid,
18393 struct kvm_cpuid_entry2 __user *entries)
18394 {
18395- int r;
18396+ int r, i;
18397
18398 r = -E2BIG;
18399 if (cpuid->nent < vcpu->arch.cpuid_nent)
18400 goto out;
18401 r = -EFAULT;
18402- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18403- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18404+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18405 goto out;
18406+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18407+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18408+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18409+ goto out;
18410+ }
18411 return 0;
18412
18413 out:
18414@@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18415 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18416 struct kvm_interrupt *irq)
18417 {
18418- if (irq->irq < 0 || irq->irq >= 256)
18419+ if (irq->irq >= 256)
18420 return -EINVAL;
18421 if (irqchip_in_kernel(vcpu->kvm))
18422 return -ENXIO;
18423@@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
18424 kvm_mmu_set_mmio_spte_mask(mask);
18425 }
18426
18427-int kvm_arch_init(void *opaque)
18428+int kvm_arch_init(const void *opaque)
18429 {
18430 int r;
18431 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18432diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18433index cf4603b..7cdde38 100644
18434--- a/arch/x86/lguest/boot.c
18435+++ b/arch/x86/lguest/boot.c
18436@@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18437 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18438 * Launcher to reboot us.
18439 */
18440-static void lguest_restart(char *reason)
18441+static __noreturn void lguest_restart(char *reason)
18442 {
18443 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18444+ BUG();
18445 }
18446
18447 /*G:050
18448diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18449index 042f682..c92afb6 100644
18450--- a/arch/x86/lib/atomic64_32.c
18451+++ b/arch/x86/lib/atomic64_32.c
18452@@ -8,18 +8,30 @@
18453
18454 long long atomic64_read_cx8(long long, const atomic64_t *v);
18455 EXPORT_SYMBOL(atomic64_read_cx8);
18456+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18457+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18458 long long atomic64_set_cx8(long long, const atomic64_t *v);
18459 EXPORT_SYMBOL(atomic64_set_cx8);
18460+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18461+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18462 long long atomic64_xchg_cx8(long long, unsigned high);
18463 EXPORT_SYMBOL(atomic64_xchg_cx8);
18464 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18465 EXPORT_SYMBOL(atomic64_add_return_cx8);
18466+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18467+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18468 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18469 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18470+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18471+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18472 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18473 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18474+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18475+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18476 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18477 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18478+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18479+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18480 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18481 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18482 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18483@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18484 #ifndef CONFIG_X86_CMPXCHG64
18485 long long atomic64_read_386(long long, const atomic64_t *v);
18486 EXPORT_SYMBOL(atomic64_read_386);
18487+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18488+EXPORT_SYMBOL(atomic64_read_unchecked_386);
18489 long long atomic64_set_386(long long, const atomic64_t *v);
18490 EXPORT_SYMBOL(atomic64_set_386);
18491+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18492+EXPORT_SYMBOL(atomic64_set_unchecked_386);
18493 long long atomic64_xchg_386(long long, unsigned high);
18494 EXPORT_SYMBOL(atomic64_xchg_386);
18495 long long atomic64_add_return_386(long long a, atomic64_t *v);
18496 EXPORT_SYMBOL(atomic64_add_return_386);
18497+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18498+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18499 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18500 EXPORT_SYMBOL(atomic64_sub_return_386);
18501+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18502+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18503 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18504 EXPORT_SYMBOL(atomic64_inc_return_386);
18505+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18506+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18507 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18508 EXPORT_SYMBOL(atomic64_dec_return_386);
18509+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18510+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18511 long long atomic64_add_386(long long a, atomic64_t *v);
18512 EXPORT_SYMBOL(atomic64_add_386);
18513+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18514+EXPORT_SYMBOL(atomic64_add_unchecked_386);
18515 long long atomic64_sub_386(long long a, atomic64_t *v);
18516 EXPORT_SYMBOL(atomic64_sub_386);
18517+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18518+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18519 long long atomic64_inc_386(long long a, atomic64_t *v);
18520 EXPORT_SYMBOL(atomic64_inc_386);
18521+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18522+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18523 long long atomic64_dec_386(long long a, atomic64_t *v);
18524 EXPORT_SYMBOL(atomic64_dec_386);
18525+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18526+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18527 long long atomic64_dec_if_positive_386(atomic64_t *v);
18528 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18529 int atomic64_inc_not_zero_386(atomic64_t *v);
18530diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18531index e8e7e0d..56fd1b0 100644
18532--- a/arch/x86/lib/atomic64_386_32.S
18533+++ b/arch/x86/lib/atomic64_386_32.S
18534@@ -48,6 +48,10 @@ BEGIN(read)
18535 movl (v), %eax
18536 movl 4(v), %edx
18537 RET_ENDP
18538+BEGIN(read_unchecked)
18539+ movl (v), %eax
18540+ movl 4(v), %edx
18541+RET_ENDP
18542 #undef v
18543
18544 #define v %esi
18545@@ -55,6 +59,10 @@ BEGIN(set)
18546 movl %ebx, (v)
18547 movl %ecx, 4(v)
18548 RET_ENDP
18549+BEGIN(set_unchecked)
18550+ movl %ebx, (v)
18551+ movl %ecx, 4(v)
18552+RET_ENDP
18553 #undef v
18554
18555 #define v %esi
18556@@ -70,6 +78,20 @@ RET_ENDP
18557 BEGIN(add)
18558 addl %eax, (v)
18559 adcl %edx, 4(v)
18560+
18561+#ifdef CONFIG_PAX_REFCOUNT
18562+ jno 0f
18563+ subl %eax, (v)
18564+ sbbl %edx, 4(v)
18565+ int $4
18566+0:
18567+ _ASM_EXTABLE(0b, 0b)
18568+#endif
18569+
18570+RET_ENDP
18571+BEGIN(add_unchecked)
18572+ addl %eax, (v)
18573+ adcl %edx, 4(v)
18574 RET_ENDP
18575 #undef v
18576
18577@@ -77,6 +99,24 @@ RET_ENDP
18578 BEGIN(add_return)
18579 addl (v), %eax
18580 adcl 4(v), %edx
18581+
18582+#ifdef CONFIG_PAX_REFCOUNT
18583+ into
18584+1234:
18585+ _ASM_EXTABLE(1234b, 2f)
18586+#endif
18587+
18588+ movl %eax, (v)
18589+ movl %edx, 4(v)
18590+
18591+#ifdef CONFIG_PAX_REFCOUNT
18592+2:
18593+#endif
18594+
18595+RET_ENDP
18596+BEGIN(add_return_unchecked)
18597+ addl (v), %eax
18598+ adcl 4(v), %edx
18599 movl %eax, (v)
18600 movl %edx, 4(v)
18601 RET_ENDP
18602@@ -86,6 +126,20 @@ RET_ENDP
18603 BEGIN(sub)
18604 subl %eax, (v)
18605 sbbl %edx, 4(v)
18606+
18607+#ifdef CONFIG_PAX_REFCOUNT
18608+ jno 0f
18609+ addl %eax, (v)
18610+ adcl %edx, 4(v)
18611+ int $4
18612+0:
18613+ _ASM_EXTABLE(0b, 0b)
18614+#endif
18615+
18616+RET_ENDP
18617+BEGIN(sub_unchecked)
18618+ subl %eax, (v)
18619+ sbbl %edx, 4(v)
18620 RET_ENDP
18621 #undef v
18622
18623@@ -96,6 +150,27 @@ BEGIN(sub_return)
18624 sbbl $0, %edx
18625 addl (v), %eax
18626 adcl 4(v), %edx
18627+
18628+#ifdef CONFIG_PAX_REFCOUNT
18629+ into
18630+1234:
18631+ _ASM_EXTABLE(1234b, 2f)
18632+#endif
18633+
18634+ movl %eax, (v)
18635+ movl %edx, 4(v)
18636+
18637+#ifdef CONFIG_PAX_REFCOUNT
18638+2:
18639+#endif
18640+
18641+RET_ENDP
18642+BEGIN(sub_return_unchecked)
18643+ negl %edx
18644+ negl %eax
18645+ sbbl $0, %edx
18646+ addl (v), %eax
18647+ adcl 4(v), %edx
18648 movl %eax, (v)
18649 movl %edx, 4(v)
18650 RET_ENDP
18651@@ -105,6 +180,20 @@ RET_ENDP
18652 BEGIN(inc)
18653 addl $1, (v)
18654 adcl $0, 4(v)
18655+
18656+#ifdef CONFIG_PAX_REFCOUNT
18657+ jno 0f
18658+ subl $1, (v)
18659+ sbbl $0, 4(v)
18660+ int $4
18661+0:
18662+ _ASM_EXTABLE(0b, 0b)
18663+#endif
18664+
18665+RET_ENDP
18666+BEGIN(inc_unchecked)
18667+ addl $1, (v)
18668+ adcl $0, 4(v)
18669 RET_ENDP
18670 #undef v
18671
18672@@ -114,6 +203,26 @@ BEGIN(inc_return)
18673 movl 4(v), %edx
18674 addl $1, %eax
18675 adcl $0, %edx
18676+
18677+#ifdef CONFIG_PAX_REFCOUNT
18678+ into
18679+1234:
18680+ _ASM_EXTABLE(1234b, 2f)
18681+#endif
18682+
18683+ movl %eax, (v)
18684+ movl %edx, 4(v)
18685+
18686+#ifdef CONFIG_PAX_REFCOUNT
18687+2:
18688+#endif
18689+
18690+RET_ENDP
18691+BEGIN(inc_return_unchecked)
18692+ movl (v), %eax
18693+ movl 4(v), %edx
18694+ addl $1, %eax
18695+ adcl $0, %edx
18696 movl %eax, (v)
18697 movl %edx, 4(v)
18698 RET_ENDP
18699@@ -123,6 +232,20 @@ RET_ENDP
18700 BEGIN(dec)
18701 subl $1, (v)
18702 sbbl $0, 4(v)
18703+
18704+#ifdef CONFIG_PAX_REFCOUNT
18705+ jno 0f
18706+ addl $1, (v)
18707+ adcl $0, 4(v)
18708+ int $4
18709+0:
18710+ _ASM_EXTABLE(0b, 0b)
18711+#endif
18712+
18713+RET_ENDP
18714+BEGIN(dec_unchecked)
18715+ subl $1, (v)
18716+ sbbl $0, 4(v)
18717 RET_ENDP
18718 #undef v
18719
18720@@ -132,6 +255,26 @@ BEGIN(dec_return)
18721 movl 4(v), %edx
18722 subl $1, %eax
18723 sbbl $0, %edx
18724+
18725+#ifdef CONFIG_PAX_REFCOUNT
18726+ into
18727+1234:
18728+ _ASM_EXTABLE(1234b, 2f)
18729+#endif
18730+
18731+ movl %eax, (v)
18732+ movl %edx, 4(v)
18733+
18734+#ifdef CONFIG_PAX_REFCOUNT
18735+2:
18736+#endif
18737+
18738+RET_ENDP
18739+BEGIN(dec_return_unchecked)
18740+ movl (v), %eax
18741+ movl 4(v), %edx
18742+ subl $1, %eax
18743+ sbbl $0, %edx
18744 movl %eax, (v)
18745 movl %edx, 4(v)
18746 RET_ENDP
18747@@ -143,6 +286,13 @@ BEGIN(add_unless)
18748 adcl %edx, %edi
18749 addl (v), %eax
18750 adcl 4(v), %edx
18751+
18752+#ifdef CONFIG_PAX_REFCOUNT
18753+ into
18754+1234:
18755+ _ASM_EXTABLE(1234b, 2f)
18756+#endif
18757+
18758 cmpl %eax, %esi
18759 je 3f
18760 1:
18761@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
18762 1:
18763 addl $1, %eax
18764 adcl $0, %edx
18765+
18766+#ifdef CONFIG_PAX_REFCOUNT
18767+ into
18768+1234:
18769+ _ASM_EXTABLE(1234b, 2f)
18770+#endif
18771+
18772 movl %eax, (v)
18773 movl %edx, 4(v)
18774 movl $1, %eax
18775@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
18776 movl 4(v), %edx
18777 subl $1, %eax
18778 sbbl $0, %edx
18779+
18780+#ifdef CONFIG_PAX_REFCOUNT
18781+ into
18782+1234:
18783+ _ASM_EXTABLE(1234b, 1f)
18784+#endif
18785+
18786 js 1f
18787 movl %eax, (v)
18788 movl %edx, 4(v)
18789diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
18790index 391a083..d658e9f 100644
18791--- a/arch/x86/lib/atomic64_cx8_32.S
18792+++ b/arch/x86/lib/atomic64_cx8_32.S
18793@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
18794 CFI_STARTPROC
18795
18796 read64 %ecx
18797+ pax_force_retaddr
18798 ret
18799 CFI_ENDPROC
18800 ENDPROC(atomic64_read_cx8)
18801
18802+ENTRY(atomic64_read_unchecked_cx8)
18803+ CFI_STARTPROC
18804+
18805+ read64 %ecx
18806+ pax_force_retaddr
18807+ ret
18808+ CFI_ENDPROC
18809+ENDPROC(atomic64_read_unchecked_cx8)
18810+
18811 ENTRY(atomic64_set_cx8)
18812 CFI_STARTPROC
18813
18814@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
18815 cmpxchg8b (%esi)
18816 jne 1b
18817
18818+ pax_force_retaddr
18819 ret
18820 CFI_ENDPROC
18821 ENDPROC(atomic64_set_cx8)
18822
18823+ENTRY(atomic64_set_unchecked_cx8)
18824+ CFI_STARTPROC
18825+
18826+1:
18827+/* we don't need LOCK_PREFIX since aligned 64-bit writes
18828+ * are atomic on 586 and newer */
18829+ cmpxchg8b (%esi)
18830+ jne 1b
18831+
18832+ pax_force_retaddr
18833+ ret
18834+ CFI_ENDPROC
18835+ENDPROC(atomic64_set_unchecked_cx8)
18836+
18837 ENTRY(atomic64_xchg_cx8)
18838 CFI_STARTPROC
18839
18840@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
18841 cmpxchg8b (%esi)
18842 jne 1b
18843
18844+ pax_force_retaddr
18845 ret
18846 CFI_ENDPROC
18847 ENDPROC(atomic64_xchg_cx8)
18848
18849-.macro addsub_return func ins insc
18850-ENTRY(atomic64_\func\()_return_cx8)
18851+.macro addsub_return func ins insc unchecked=""
18852+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18853 CFI_STARTPROC
18854 SAVE ebp
18855 SAVE ebx
18856@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
18857 movl %edx, %ecx
18858 \ins\()l %esi, %ebx
18859 \insc\()l %edi, %ecx
18860+
18861+.ifb \unchecked
18862+#ifdef CONFIG_PAX_REFCOUNT
18863+ into
18864+2:
18865+ _ASM_EXTABLE(2b, 3f)
18866+#endif
18867+.endif
18868+
18869 LOCK_PREFIX
18870 cmpxchg8b (%ebp)
18871 jne 1b
18872-
18873-10:
18874 movl %ebx, %eax
18875 movl %ecx, %edx
18876+
18877+.ifb \unchecked
18878+#ifdef CONFIG_PAX_REFCOUNT
18879+3:
18880+#endif
18881+.endif
18882+
18883 RESTORE edi
18884 RESTORE esi
18885 RESTORE ebx
18886 RESTORE ebp
18887+ pax_force_retaddr
18888 ret
18889 CFI_ENDPROC
18890-ENDPROC(atomic64_\func\()_return_cx8)
18891+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18892 .endm
18893
18894 addsub_return add add adc
18895 addsub_return sub sub sbb
18896+addsub_return add add adc _unchecked
18897+addsub_return sub sub sbb _unchecked
18898
18899-.macro incdec_return func ins insc
18900-ENTRY(atomic64_\func\()_return_cx8)
18901+.macro incdec_return func ins insc unchecked
18902+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18903 CFI_STARTPROC
18904 SAVE ebx
18905
18906@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
18907 movl %edx, %ecx
18908 \ins\()l $1, %ebx
18909 \insc\()l $0, %ecx
18910+
18911+.ifb \unchecked
18912+#ifdef CONFIG_PAX_REFCOUNT
18913+ into
18914+2:
18915+ _ASM_EXTABLE(2b, 3f)
18916+#endif
18917+.endif
18918+
18919 LOCK_PREFIX
18920 cmpxchg8b (%esi)
18921 jne 1b
18922
18923-10:
18924 movl %ebx, %eax
18925 movl %ecx, %edx
18926+
18927+.ifb \unchecked
18928+#ifdef CONFIG_PAX_REFCOUNT
18929+3:
18930+#endif
18931+.endif
18932+
18933 RESTORE ebx
18934+ pax_force_retaddr
18935 ret
18936 CFI_ENDPROC
18937-ENDPROC(atomic64_\func\()_return_cx8)
18938+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18939 .endm
18940
18941 incdec_return inc add adc
18942 incdec_return dec sub sbb
18943+incdec_return inc add adc _unchecked
18944+incdec_return dec sub sbb _unchecked
18945
18946 ENTRY(atomic64_dec_if_positive_cx8)
18947 CFI_STARTPROC
18948@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
18949 movl %edx, %ecx
18950 subl $1, %ebx
18951 sbb $0, %ecx
18952+
18953+#ifdef CONFIG_PAX_REFCOUNT
18954+ into
18955+1234:
18956+ _ASM_EXTABLE(1234b, 2f)
18957+#endif
18958+
18959 js 2f
18960 LOCK_PREFIX
18961 cmpxchg8b (%esi)
18962@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
18963 movl %ebx, %eax
18964 movl %ecx, %edx
18965 RESTORE ebx
18966+ pax_force_retaddr
18967 ret
18968 CFI_ENDPROC
18969 ENDPROC(atomic64_dec_if_positive_cx8)
18970@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
18971 movl %edx, %ecx
18972 addl %esi, %ebx
18973 adcl %edi, %ecx
18974+
18975+#ifdef CONFIG_PAX_REFCOUNT
18976+ into
18977+1234:
18978+ _ASM_EXTABLE(1234b, 3f)
18979+#endif
18980+
18981 LOCK_PREFIX
18982 cmpxchg8b (%ebp)
18983 jne 1b
18984@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
18985 CFI_ADJUST_CFA_OFFSET -8
18986 RESTORE ebx
18987 RESTORE ebp
18988+ pax_force_retaddr
18989 ret
18990 4:
18991 cmpl %edx, 4(%esp)
18992@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
18993 movl %edx, %ecx
18994 addl $1, %ebx
18995 adcl $0, %ecx
18996+
18997+#ifdef CONFIG_PAX_REFCOUNT
18998+ into
18999+1234:
19000+ _ASM_EXTABLE(1234b, 3f)
19001+#endif
19002+
19003 LOCK_PREFIX
19004 cmpxchg8b (%esi)
19005 jne 1b
19006@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
19007 movl $1, %eax
19008 3:
19009 RESTORE ebx
19010+ pax_force_retaddr
19011 ret
19012 4:
19013 testl %edx, %edx
19014diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
19015index 78d16a5..fbcf666 100644
19016--- a/arch/x86/lib/checksum_32.S
19017+++ b/arch/x86/lib/checksum_32.S
19018@@ -28,7 +28,8 @@
19019 #include <linux/linkage.h>
19020 #include <asm/dwarf2.h>
19021 #include <asm/errno.h>
19022-
19023+#include <asm/segment.h>
19024+
19025 /*
19026 * computes a partial checksum, e.g. for TCP/UDP fragments
19027 */
19028@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
19029
19030 #define ARGBASE 16
19031 #define FP 12
19032-
19033-ENTRY(csum_partial_copy_generic)
19034+
19035+ENTRY(csum_partial_copy_generic_to_user)
19036 CFI_STARTPROC
19037+
19038+#ifdef CONFIG_PAX_MEMORY_UDEREF
19039+ pushl_cfi %gs
19040+ popl_cfi %es
19041+ jmp csum_partial_copy_generic
19042+#endif
19043+
19044+ENTRY(csum_partial_copy_generic_from_user)
19045+
19046+#ifdef CONFIG_PAX_MEMORY_UDEREF
19047+ pushl_cfi %gs
19048+ popl_cfi %ds
19049+#endif
19050+
19051+ENTRY(csum_partial_copy_generic)
19052 subl $4,%esp
19053 CFI_ADJUST_CFA_OFFSET 4
19054 pushl_cfi %edi
19055@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19056 jmp 4f
19057 SRC(1: movw (%esi), %bx )
19058 addl $2, %esi
19059-DST( movw %bx, (%edi) )
19060+DST( movw %bx, %es:(%edi) )
19061 addl $2, %edi
19062 addw %bx, %ax
19063 adcl $0, %eax
19064@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19065 SRC(1: movl (%esi), %ebx )
19066 SRC( movl 4(%esi), %edx )
19067 adcl %ebx, %eax
19068-DST( movl %ebx, (%edi) )
19069+DST( movl %ebx, %es:(%edi) )
19070 adcl %edx, %eax
19071-DST( movl %edx, 4(%edi) )
19072+DST( movl %edx, %es:4(%edi) )
19073
19074 SRC( movl 8(%esi), %ebx )
19075 SRC( movl 12(%esi), %edx )
19076 adcl %ebx, %eax
19077-DST( movl %ebx, 8(%edi) )
19078+DST( movl %ebx, %es:8(%edi) )
19079 adcl %edx, %eax
19080-DST( movl %edx, 12(%edi) )
19081+DST( movl %edx, %es:12(%edi) )
19082
19083 SRC( movl 16(%esi), %ebx )
19084 SRC( movl 20(%esi), %edx )
19085 adcl %ebx, %eax
19086-DST( movl %ebx, 16(%edi) )
19087+DST( movl %ebx, %es:16(%edi) )
19088 adcl %edx, %eax
19089-DST( movl %edx, 20(%edi) )
19090+DST( movl %edx, %es:20(%edi) )
19091
19092 SRC( movl 24(%esi), %ebx )
19093 SRC( movl 28(%esi), %edx )
19094 adcl %ebx, %eax
19095-DST( movl %ebx, 24(%edi) )
19096+DST( movl %ebx, %es:24(%edi) )
19097 adcl %edx, %eax
19098-DST( movl %edx, 28(%edi) )
19099+DST( movl %edx, %es:28(%edi) )
19100
19101 lea 32(%esi), %esi
19102 lea 32(%edi), %edi
19103@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19104 shrl $2, %edx # This clears CF
19105 SRC(3: movl (%esi), %ebx )
19106 adcl %ebx, %eax
19107-DST( movl %ebx, (%edi) )
19108+DST( movl %ebx, %es:(%edi) )
19109 lea 4(%esi), %esi
19110 lea 4(%edi), %edi
19111 dec %edx
19112@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19113 jb 5f
19114 SRC( movw (%esi), %cx )
19115 leal 2(%esi), %esi
19116-DST( movw %cx, (%edi) )
19117+DST( movw %cx, %es:(%edi) )
19118 leal 2(%edi), %edi
19119 je 6f
19120 shll $16,%ecx
19121 SRC(5: movb (%esi), %cl )
19122-DST( movb %cl, (%edi) )
19123+DST( movb %cl, %es:(%edi) )
19124 6: addl %ecx, %eax
19125 adcl $0, %eax
19126 7:
19127@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19128
19129 6001:
19130 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19131- movl $-EFAULT, (%ebx)
19132+ movl $-EFAULT, %ss:(%ebx)
19133
19134 # zero the complete destination - computing the rest
19135 # is too much work
19136@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19137
19138 6002:
19139 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19140- movl $-EFAULT,(%ebx)
19141+ movl $-EFAULT,%ss:(%ebx)
19142 jmp 5000b
19143
19144 .previous
19145
19146+ pushl_cfi %ss
19147+ popl_cfi %ds
19148+ pushl_cfi %ss
19149+ popl_cfi %es
19150 popl_cfi %ebx
19151 CFI_RESTORE ebx
19152 popl_cfi %esi
19153@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19154 popl_cfi %ecx # equivalent to addl $4,%esp
19155 ret
19156 CFI_ENDPROC
19157-ENDPROC(csum_partial_copy_generic)
19158+ENDPROC(csum_partial_copy_generic_to_user)
19159
19160 #else
19161
19162 /* Version for PentiumII/PPro */
19163
19164 #define ROUND1(x) \
19165+ nop; nop; nop; \
19166 SRC(movl x(%esi), %ebx ) ; \
19167 addl %ebx, %eax ; \
19168- DST(movl %ebx, x(%edi) ) ;
19169+ DST(movl %ebx, %es:x(%edi)) ;
19170
19171 #define ROUND(x) \
19172+ nop; nop; nop; \
19173 SRC(movl x(%esi), %ebx ) ; \
19174 adcl %ebx, %eax ; \
19175- DST(movl %ebx, x(%edi) ) ;
19176+ DST(movl %ebx, %es:x(%edi)) ;
19177
19178 #define ARGBASE 12
19179-
19180-ENTRY(csum_partial_copy_generic)
19181+
19182+ENTRY(csum_partial_copy_generic_to_user)
19183 CFI_STARTPROC
19184+
19185+#ifdef CONFIG_PAX_MEMORY_UDEREF
19186+ pushl_cfi %gs
19187+ popl_cfi %es
19188+ jmp csum_partial_copy_generic
19189+#endif
19190+
19191+ENTRY(csum_partial_copy_generic_from_user)
19192+
19193+#ifdef CONFIG_PAX_MEMORY_UDEREF
19194+ pushl_cfi %gs
19195+ popl_cfi %ds
19196+#endif
19197+
19198+ENTRY(csum_partial_copy_generic)
19199 pushl_cfi %ebx
19200 CFI_REL_OFFSET ebx, 0
19201 pushl_cfi %edi
19202@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19203 subl %ebx, %edi
19204 lea -1(%esi),%edx
19205 andl $-32,%edx
19206- lea 3f(%ebx,%ebx), %ebx
19207+ lea 3f(%ebx,%ebx,2), %ebx
19208 testl %esi, %esi
19209 jmp *%ebx
19210 1: addl $64,%esi
19211@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19212 jb 5f
19213 SRC( movw (%esi), %dx )
19214 leal 2(%esi), %esi
19215-DST( movw %dx, (%edi) )
19216+DST( movw %dx, %es:(%edi) )
19217 leal 2(%edi), %edi
19218 je 6f
19219 shll $16,%edx
19220 5:
19221 SRC( movb (%esi), %dl )
19222-DST( movb %dl, (%edi) )
19223+DST( movb %dl, %es:(%edi) )
19224 6: addl %edx, %eax
19225 adcl $0, %eax
19226 7:
19227 .section .fixup, "ax"
19228 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19229- movl $-EFAULT, (%ebx)
19230+ movl $-EFAULT, %ss:(%ebx)
19231 # zero the complete destination (computing the rest is too much work)
19232 movl ARGBASE+8(%esp),%edi # dst
19233 movl ARGBASE+12(%esp),%ecx # len
19234@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19235 rep; stosb
19236 jmp 7b
19237 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19238- movl $-EFAULT, (%ebx)
19239+ movl $-EFAULT, %ss:(%ebx)
19240 jmp 7b
19241 .previous
19242
19243+#ifdef CONFIG_PAX_MEMORY_UDEREF
19244+ pushl_cfi %ss
19245+ popl_cfi %ds
19246+ pushl_cfi %ss
19247+ popl_cfi %es
19248+#endif
19249+
19250 popl_cfi %esi
19251 CFI_RESTORE esi
19252 popl_cfi %edi
19253@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19254 CFI_RESTORE ebx
19255 ret
19256 CFI_ENDPROC
19257-ENDPROC(csum_partial_copy_generic)
19258+ENDPROC(csum_partial_copy_generic_to_user)
19259
19260 #undef ROUND
19261 #undef ROUND1
19262diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19263index f2145cf..cea889d 100644
19264--- a/arch/x86/lib/clear_page_64.S
19265+++ b/arch/x86/lib/clear_page_64.S
19266@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19267 movl $4096/8,%ecx
19268 xorl %eax,%eax
19269 rep stosq
19270+ pax_force_retaddr
19271 ret
19272 CFI_ENDPROC
19273 ENDPROC(clear_page_c)
19274@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19275 movl $4096,%ecx
19276 xorl %eax,%eax
19277 rep stosb
19278+ pax_force_retaddr
19279 ret
19280 CFI_ENDPROC
19281 ENDPROC(clear_page_c_e)
19282@@ -43,6 +45,7 @@ ENTRY(clear_page)
19283 leaq 64(%rdi),%rdi
19284 jnz .Lloop
19285 nop
19286+ pax_force_retaddr
19287 ret
19288 CFI_ENDPROC
19289 .Lclear_page_end:
19290@@ -58,7 +61,7 @@ ENDPROC(clear_page)
19291
19292 #include <asm/cpufeature.h>
19293
19294- .section .altinstr_replacement,"ax"
19295+ .section .altinstr_replacement,"a"
19296 1: .byte 0xeb /* jmp <disp8> */
19297 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19298 2: .byte 0xeb /* jmp <disp8> */
19299diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19300index 1e572c5..2a162cd 100644
19301--- a/arch/x86/lib/cmpxchg16b_emu.S
19302+++ b/arch/x86/lib/cmpxchg16b_emu.S
19303@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19304
19305 popf
19306 mov $1, %al
19307+ pax_force_retaddr
19308 ret
19309
19310 not_same:
19311 popf
19312 xor %al,%al
19313+ pax_force_retaddr
19314 ret
19315
19316 CFI_ENDPROC
19317diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19318index 01c805b..dccb07f 100644
19319--- a/arch/x86/lib/copy_page_64.S
19320+++ b/arch/x86/lib/copy_page_64.S
19321@@ -9,6 +9,7 @@ copy_page_c:
19322 CFI_STARTPROC
19323 movl $4096/8,%ecx
19324 rep movsq
19325+ pax_force_retaddr
19326 ret
19327 CFI_ENDPROC
19328 ENDPROC(copy_page_c)
19329@@ -39,7 +40,7 @@ ENTRY(copy_page)
19330 movq 16 (%rsi), %rdx
19331 movq 24 (%rsi), %r8
19332 movq 32 (%rsi), %r9
19333- movq 40 (%rsi), %r10
19334+ movq 40 (%rsi), %r13
19335 movq 48 (%rsi), %r11
19336 movq 56 (%rsi), %r12
19337
19338@@ -50,7 +51,7 @@ ENTRY(copy_page)
19339 movq %rdx, 16 (%rdi)
19340 movq %r8, 24 (%rdi)
19341 movq %r9, 32 (%rdi)
19342- movq %r10, 40 (%rdi)
19343+ movq %r13, 40 (%rdi)
19344 movq %r11, 48 (%rdi)
19345 movq %r12, 56 (%rdi)
19346
19347@@ -69,7 +70,7 @@ ENTRY(copy_page)
19348 movq 16 (%rsi), %rdx
19349 movq 24 (%rsi), %r8
19350 movq 32 (%rsi), %r9
19351- movq 40 (%rsi), %r10
19352+ movq 40 (%rsi), %r13
19353 movq 48 (%rsi), %r11
19354 movq 56 (%rsi), %r12
19355
19356@@ -78,7 +79,7 @@ ENTRY(copy_page)
19357 movq %rdx, 16 (%rdi)
19358 movq %r8, 24 (%rdi)
19359 movq %r9, 32 (%rdi)
19360- movq %r10, 40 (%rdi)
19361+ movq %r13, 40 (%rdi)
19362 movq %r11, 48 (%rdi)
19363 movq %r12, 56 (%rdi)
19364
19365@@ -95,6 +96,7 @@ ENTRY(copy_page)
19366 CFI_RESTORE r13
19367 addq $3*8,%rsp
19368 CFI_ADJUST_CFA_OFFSET -3*8
19369+ pax_force_retaddr
19370 ret
19371 .Lcopy_page_end:
19372 CFI_ENDPROC
19373@@ -105,7 +107,7 @@ ENDPROC(copy_page)
19374
19375 #include <asm/cpufeature.h>
19376
19377- .section .altinstr_replacement,"ax"
19378+ .section .altinstr_replacement,"a"
19379 1: .byte 0xeb /* jmp <disp8> */
19380 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19381 2:
19382diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19383index 0248402..821c786 100644
19384--- a/arch/x86/lib/copy_user_64.S
19385+++ b/arch/x86/lib/copy_user_64.S
19386@@ -16,6 +16,7 @@
19387 #include <asm/thread_info.h>
19388 #include <asm/cpufeature.h>
19389 #include <asm/alternative-asm.h>
19390+#include <asm/pgtable.h>
19391
19392 /*
19393 * By placing feature2 after feature1 in altinstructions section, we logically
19394@@ -29,7 +30,7 @@
19395 .byte 0xe9 /* 32bit jump */
19396 .long \orig-1f /* by default jump to orig */
19397 1:
19398- .section .altinstr_replacement,"ax"
19399+ .section .altinstr_replacement,"a"
19400 2: .byte 0xe9 /* near jump with 32bit immediate */
19401 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19402 3: .byte 0xe9 /* near jump with 32bit immediate */
19403@@ -71,47 +72,20 @@
19404 #endif
19405 .endm
19406
19407-/* Standard copy_to_user with segment limit checking */
19408-ENTRY(_copy_to_user)
19409- CFI_STARTPROC
19410- GET_THREAD_INFO(%rax)
19411- movq %rdi,%rcx
19412- addq %rdx,%rcx
19413- jc bad_to_user
19414- cmpq TI_addr_limit(%rax),%rcx
19415- ja bad_to_user
19416- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19417- copy_user_generic_unrolled,copy_user_generic_string, \
19418- copy_user_enhanced_fast_string
19419- CFI_ENDPROC
19420-ENDPROC(_copy_to_user)
19421-
19422-/* Standard copy_from_user with segment limit checking */
19423-ENTRY(_copy_from_user)
19424- CFI_STARTPROC
19425- GET_THREAD_INFO(%rax)
19426- movq %rsi,%rcx
19427- addq %rdx,%rcx
19428- jc bad_from_user
19429- cmpq TI_addr_limit(%rax),%rcx
19430- ja bad_from_user
19431- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19432- copy_user_generic_unrolled,copy_user_generic_string, \
19433- copy_user_enhanced_fast_string
19434- CFI_ENDPROC
19435-ENDPROC(_copy_from_user)
19436-
19437 .section .fixup,"ax"
19438 /* must zero dest */
19439 ENTRY(bad_from_user)
19440 bad_from_user:
19441 CFI_STARTPROC
19442+ testl %edx,%edx
19443+ js bad_to_user
19444 movl %edx,%ecx
19445 xorl %eax,%eax
19446 rep
19447 stosb
19448 bad_to_user:
19449 movl %edx,%eax
19450+ pax_force_retaddr
19451 ret
19452 CFI_ENDPROC
19453 ENDPROC(bad_from_user)
19454@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19455 jz 17f
19456 1: movq (%rsi),%r8
19457 2: movq 1*8(%rsi),%r9
19458-3: movq 2*8(%rsi),%r10
19459+3: movq 2*8(%rsi),%rax
19460 4: movq 3*8(%rsi),%r11
19461 5: movq %r8,(%rdi)
19462 6: movq %r9,1*8(%rdi)
19463-7: movq %r10,2*8(%rdi)
19464+7: movq %rax,2*8(%rdi)
19465 8: movq %r11,3*8(%rdi)
19466 9: movq 4*8(%rsi),%r8
19467 10: movq 5*8(%rsi),%r9
19468-11: movq 6*8(%rsi),%r10
19469+11: movq 6*8(%rsi),%rax
19470 12: movq 7*8(%rsi),%r11
19471 13: movq %r8,4*8(%rdi)
19472 14: movq %r9,5*8(%rdi)
19473-15: movq %r10,6*8(%rdi)
19474+15: movq %rax,6*8(%rdi)
19475 16: movq %r11,7*8(%rdi)
19476 leaq 64(%rsi),%rsi
19477 leaq 64(%rdi),%rdi
19478@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19479 decl %ecx
19480 jnz 21b
19481 23: xor %eax,%eax
19482+ pax_force_retaddr
19483 ret
19484
19485 .section .fixup,"ax"
19486@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19487 3: rep
19488 movsb
19489 4: xorl %eax,%eax
19490+ pax_force_retaddr
19491 ret
19492
19493 .section .fixup,"ax"
19494@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19495 1: rep
19496 movsb
19497 2: xorl %eax,%eax
19498+ pax_force_retaddr
19499 ret
19500
19501 .section .fixup,"ax"
19502diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19503index cb0c112..e3a6895 100644
19504--- a/arch/x86/lib/copy_user_nocache_64.S
19505+++ b/arch/x86/lib/copy_user_nocache_64.S
19506@@ -8,12 +8,14 @@
19507
19508 #include <linux/linkage.h>
19509 #include <asm/dwarf2.h>
19510+#include <asm/alternative-asm.h>
19511
19512 #define FIX_ALIGNMENT 1
19513
19514 #include <asm/current.h>
19515 #include <asm/asm-offsets.h>
19516 #include <asm/thread_info.h>
19517+#include <asm/pgtable.h>
19518
19519 .macro ALIGN_DESTINATION
19520 #ifdef FIX_ALIGNMENT
19521@@ -50,6 +52,15 @@
19522 */
19523 ENTRY(__copy_user_nocache)
19524 CFI_STARTPROC
19525+
19526+#ifdef CONFIG_PAX_MEMORY_UDEREF
19527+ mov $PAX_USER_SHADOW_BASE,%rcx
19528+ cmp %rcx,%rsi
19529+ jae 1f
19530+ add %rcx,%rsi
19531+1:
19532+#endif
19533+
19534 cmpl $8,%edx
19535 jb 20f /* less then 8 bytes, go to byte copy loop */
19536 ALIGN_DESTINATION
19537@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19538 jz 17f
19539 1: movq (%rsi),%r8
19540 2: movq 1*8(%rsi),%r9
19541-3: movq 2*8(%rsi),%r10
19542+3: movq 2*8(%rsi),%rax
19543 4: movq 3*8(%rsi),%r11
19544 5: movnti %r8,(%rdi)
19545 6: movnti %r9,1*8(%rdi)
19546-7: movnti %r10,2*8(%rdi)
19547+7: movnti %rax,2*8(%rdi)
19548 8: movnti %r11,3*8(%rdi)
19549 9: movq 4*8(%rsi),%r8
19550 10: movq 5*8(%rsi),%r9
19551-11: movq 6*8(%rsi),%r10
19552+11: movq 6*8(%rsi),%rax
19553 12: movq 7*8(%rsi),%r11
19554 13: movnti %r8,4*8(%rdi)
19555 14: movnti %r9,5*8(%rdi)
19556-15: movnti %r10,6*8(%rdi)
19557+15: movnti %rax,6*8(%rdi)
19558 16: movnti %r11,7*8(%rdi)
19559 leaq 64(%rsi),%rsi
19560 leaq 64(%rdi),%rdi
19561@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19562 jnz 21b
19563 23: xorl %eax,%eax
19564 sfence
19565+ pax_force_retaddr
19566 ret
19567
19568 .section .fixup,"ax"
19569diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19570index fb903b7..c92b7f7 100644
19571--- a/arch/x86/lib/csum-copy_64.S
19572+++ b/arch/x86/lib/csum-copy_64.S
19573@@ -8,6 +8,7 @@
19574 #include <linux/linkage.h>
19575 #include <asm/dwarf2.h>
19576 #include <asm/errno.h>
19577+#include <asm/alternative-asm.h>
19578
19579 /*
19580 * Checksum copy with exception handling.
19581@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19582 CFI_RESTORE rbp
19583 addq $7*8, %rsp
19584 CFI_ADJUST_CFA_OFFSET -7*8
19585+ pax_force_retaddr 0, 1
19586 ret
19587 CFI_RESTORE_STATE
19588
19589diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19590index 459b58a..9570bc7 100644
19591--- a/arch/x86/lib/csum-wrappers_64.c
19592+++ b/arch/x86/lib/csum-wrappers_64.c
19593@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19594 len -= 2;
19595 }
19596 }
19597- isum = csum_partial_copy_generic((__force const void *)src,
19598+
19599+#ifdef CONFIG_PAX_MEMORY_UDEREF
19600+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19601+ src += PAX_USER_SHADOW_BASE;
19602+#endif
19603+
19604+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
19605 dst, len, isum, errp, NULL);
19606 if (unlikely(*errp))
19607 goto out_err;
19608@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19609 }
19610
19611 *errp = 0;
19612- return csum_partial_copy_generic(src, (void __force *)dst,
19613+
19614+#ifdef CONFIG_PAX_MEMORY_UDEREF
19615+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19616+ dst += PAX_USER_SHADOW_BASE;
19617+#endif
19618+
19619+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19620 len, isum, NULL, errp);
19621 }
19622 EXPORT_SYMBOL(csum_partial_copy_to_user);
19623diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19624index 51f1504..ddac4c1 100644
19625--- a/arch/x86/lib/getuser.S
19626+++ b/arch/x86/lib/getuser.S
19627@@ -33,15 +33,38 @@
19628 #include <asm/asm-offsets.h>
19629 #include <asm/thread_info.h>
19630 #include <asm/asm.h>
19631+#include <asm/segment.h>
19632+#include <asm/pgtable.h>
19633+#include <asm/alternative-asm.h>
19634+
19635+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19636+#define __copyuser_seg gs;
19637+#else
19638+#define __copyuser_seg
19639+#endif
19640
19641 .text
19642 ENTRY(__get_user_1)
19643 CFI_STARTPROC
19644+
19645+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19646 GET_THREAD_INFO(%_ASM_DX)
19647 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19648 jae bad_get_user
19649-1: movzb (%_ASM_AX),%edx
19650+
19651+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19652+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19653+ cmp %_ASM_DX,%_ASM_AX
19654+ jae 1234f
19655+ add %_ASM_DX,%_ASM_AX
19656+1234:
19657+#endif
19658+
19659+#endif
19660+
19661+1: __copyuser_seg movzb (%_ASM_AX),%edx
19662 xor %eax,%eax
19663+ pax_force_retaddr
19664 ret
19665 CFI_ENDPROC
19666 ENDPROC(__get_user_1)
19667@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19668 ENTRY(__get_user_2)
19669 CFI_STARTPROC
19670 add $1,%_ASM_AX
19671+
19672+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19673 jc bad_get_user
19674 GET_THREAD_INFO(%_ASM_DX)
19675 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19676 jae bad_get_user
19677-2: movzwl -1(%_ASM_AX),%edx
19678+
19679+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19680+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19681+ cmp %_ASM_DX,%_ASM_AX
19682+ jae 1234f
19683+ add %_ASM_DX,%_ASM_AX
19684+1234:
19685+#endif
19686+
19687+#endif
19688+
19689+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19690 xor %eax,%eax
19691+ pax_force_retaddr
19692 ret
19693 CFI_ENDPROC
19694 ENDPROC(__get_user_2)
19695@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19696 ENTRY(__get_user_4)
19697 CFI_STARTPROC
19698 add $3,%_ASM_AX
19699+
19700+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19701 jc bad_get_user
19702 GET_THREAD_INFO(%_ASM_DX)
19703 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19704 jae bad_get_user
19705-3: mov -3(%_ASM_AX),%edx
19706+
19707+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19708+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19709+ cmp %_ASM_DX,%_ASM_AX
19710+ jae 1234f
19711+ add %_ASM_DX,%_ASM_AX
19712+1234:
19713+#endif
19714+
19715+#endif
19716+
19717+3: __copyuser_seg mov -3(%_ASM_AX),%edx
19718 xor %eax,%eax
19719+ pax_force_retaddr
19720 ret
19721 CFI_ENDPROC
19722 ENDPROC(__get_user_4)
19723@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19724 GET_THREAD_INFO(%_ASM_DX)
19725 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19726 jae bad_get_user
19727+
19728+#ifdef CONFIG_PAX_MEMORY_UDEREF
19729+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19730+ cmp %_ASM_DX,%_ASM_AX
19731+ jae 1234f
19732+ add %_ASM_DX,%_ASM_AX
19733+1234:
19734+#endif
19735+
19736 4: movq -7(%_ASM_AX),%_ASM_DX
19737 xor %eax,%eax
19738+ pax_force_retaddr
19739 ret
19740 CFI_ENDPROC
19741 ENDPROC(__get_user_8)
19742@@ -91,6 +152,7 @@ bad_get_user:
19743 CFI_STARTPROC
19744 xor %edx,%edx
19745 mov $(-EFAULT),%_ASM_AX
19746+ pax_force_retaddr
19747 ret
19748 CFI_ENDPROC
19749 END(bad_get_user)
19750diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
19751index 374562e..a75830b 100644
19752--- a/arch/x86/lib/insn.c
19753+++ b/arch/x86/lib/insn.c
19754@@ -21,6 +21,11 @@
19755 #include <linux/string.h>
19756 #include <asm/inat.h>
19757 #include <asm/insn.h>
19758+#ifdef __KERNEL__
19759+#include <asm/pgtable_types.h>
19760+#else
19761+#define ktla_ktva(addr) addr
19762+#endif
19763
19764 /* Verify next sizeof(t) bytes can be on the same instruction */
19765 #define validate_next(t, insn, n) \
19766@@ -49,8 +54,8 @@
19767 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
19768 {
19769 memset(insn, 0, sizeof(*insn));
19770- insn->kaddr = kaddr;
19771- insn->next_byte = kaddr;
19772+ insn->kaddr = ktla_ktva(kaddr);
19773+ insn->next_byte = ktla_ktva(kaddr);
19774 insn->x86_64 = x86_64 ? 1 : 0;
19775 insn->opnd_bytes = 4;
19776 if (x86_64)
19777diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
19778index 05a95e7..326f2fa 100644
19779--- a/arch/x86/lib/iomap_copy_64.S
19780+++ b/arch/x86/lib/iomap_copy_64.S
19781@@ -17,6 +17,7 @@
19782
19783 #include <linux/linkage.h>
19784 #include <asm/dwarf2.h>
19785+#include <asm/alternative-asm.h>
19786
19787 /*
19788 * override generic version in lib/iomap_copy.c
19789@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
19790 CFI_STARTPROC
19791 movl %edx,%ecx
19792 rep movsd
19793+ pax_force_retaddr
19794 ret
19795 CFI_ENDPROC
19796 ENDPROC(__iowrite32_copy)
19797diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
19798index efbf2a0..8893637 100644
19799--- a/arch/x86/lib/memcpy_64.S
19800+++ b/arch/x86/lib/memcpy_64.S
19801@@ -34,6 +34,7 @@
19802 rep movsq
19803 movl %edx, %ecx
19804 rep movsb
19805+ pax_force_retaddr
19806 ret
19807 .Lmemcpy_e:
19808 .previous
19809@@ -51,6 +52,7 @@
19810
19811 movl %edx, %ecx
19812 rep movsb
19813+ pax_force_retaddr
19814 ret
19815 .Lmemcpy_e_e:
19816 .previous
19817@@ -81,13 +83,13 @@ ENTRY(memcpy)
19818 */
19819 movq 0*8(%rsi), %r8
19820 movq 1*8(%rsi), %r9
19821- movq 2*8(%rsi), %r10
19822+ movq 2*8(%rsi), %rcx
19823 movq 3*8(%rsi), %r11
19824 leaq 4*8(%rsi), %rsi
19825
19826 movq %r8, 0*8(%rdi)
19827 movq %r9, 1*8(%rdi)
19828- movq %r10, 2*8(%rdi)
19829+ movq %rcx, 2*8(%rdi)
19830 movq %r11, 3*8(%rdi)
19831 leaq 4*8(%rdi), %rdi
19832 jae .Lcopy_forward_loop
19833@@ -110,12 +112,12 @@ ENTRY(memcpy)
19834 subq $0x20, %rdx
19835 movq -1*8(%rsi), %r8
19836 movq -2*8(%rsi), %r9
19837- movq -3*8(%rsi), %r10
19838+ movq -3*8(%rsi), %rcx
19839 movq -4*8(%rsi), %r11
19840 leaq -4*8(%rsi), %rsi
19841 movq %r8, -1*8(%rdi)
19842 movq %r9, -2*8(%rdi)
19843- movq %r10, -3*8(%rdi)
19844+ movq %rcx, -3*8(%rdi)
19845 movq %r11, -4*8(%rdi)
19846 leaq -4*8(%rdi), %rdi
19847 jae .Lcopy_backward_loop
19848@@ -135,12 +137,13 @@ ENTRY(memcpy)
19849 */
19850 movq 0*8(%rsi), %r8
19851 movq 1*8(%rsi), %r9
19852- movq -2*8(%rsi, %rdx), %r10
19853+ movq -2*8(%rsi, %rdx), %rcx
19854 movq -1*8(%rsi, %rdx), %r11
19855 movq %r8, 0*8(%rdi)
19856 movq %r9, 1*8(%rdi)
19857- movq %r10, -2*8(%rdi, %rdx)
19858+ movq %rcx, -2*8(%rdi, %rdx)
19859 movq %r11, -1*8(%rdi, %rdx)
19860+ pax_force_retaddr
19861 retq
19862 .p2align 4
19863 .Lless_16bytes:
19864@@ -153,6 +156,7 @@ ENTRY(memcpy)
19865 movq -1*8(%rsi, %rdx), %r9
19866 movq %r8, 0*8(%rdi)
19867 movq %r9, -1*8(%rdi, %rdx)
19868+ pax_force_retaddr
19869 retq
19870 .p2align 4
19871 .Lless_8bytes:
19872@@ -166,6 +170,7 @@ ENTRY(memcpy)
19873 movl -4(%rsi, %rdx), %r8d
19874 movl %ecx, (%rdi)
19875 movl %r8d, -4(%rdi, %rdx)
19876+ pax_force_retaddr
19877 retq
19878 .p2align 4
19879 .Lless_3bytes:
19880@@ -183,6 +188,7 @@ ENTRY(memcpy)
19881 jnz .Lloop_1
19882
19883 .Lend:
19884+ pax_force_retaddr
19885 retq
19886 CFI_ENDPROC
19887 ENDPROC(memcpy)
19888diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
19889index ee16461..c39c199 100644
19890--- a/arch/x86/lib/memmove_64.S
19891+++ b/arch/x86/lib/memmove_64.S
19892@@ -61,13 +61,13 @@ ENTRY(memmove)
19893 5:
19894 sub $0x20, %rdx
19895 movq 0*8(%rsi), %r11
19896- movq 1*8(%rsi), %r10
19897+ movq 1*8(%rsi), %rcx
19898 movq 2*8(%rsi), %r9
19899 movq 3*8(%rsi), %r8
19900 leaq 4*8(%rsi), %rsi
19901
19902 movq %r11, 0*8(%rdi)
19903- movq %r10, 1*8(%rdi)
19904+ movq %rcx, 1*8(%rdi)
19905 movq %r9, 2*8(%rdi)
19906 movq %r8, 3*8(%rdi)
19907 leaq 4*8(%rdi), %rdi
19908@@ -81,10 +81,10 @@ ENTRY(memmove)
19909 4:
19910 movq %rdx, %rcx
19911 movq -8(%rsi, %rdx), %r11
19912- lea -8(%rdi, %rdx), %r10
19913+ lea -8(%rdi, %rdx), %r9
19914 shrq $3, %rcx
19915 rep movsq
19916- movq %r11, (%r10)
19917+ movq %r11, (%r9)
19918 jmp 13f
19919 .Lmemmove_end_forward:
19920
19921@@ -95,14 +95,14 @@ ENTRY(memmove)
19922 7:
19923 movq %rdx, %rcx
19924 movq (%rsi), %r11
19925- movq %rdi, %r10
19926+ movq %rdi, %r9
19927 leaq -8(%rsi, %rdx), %rsi
19928 leaq -8(%rdi, %rdx), %rdi
19929 shrq $3, %rcx
19930 std
19931 rep movsq
19932 cld
19933- movq %r11, (%r10)
19934+ movq %r11, (%r9)
19935 jmp 13f
19936
19937 /*
19938@@ -127,13 +127,13 @@ ENTRY(memmove)
19939 8:
19940 subq $0x20, %rdx
19941 movq -1*8(%rsi), %r11
19942- movq -2*8(%rsi), %r10
19943+ movq -2*8(%rsi), %rcx
19944 movq -3*8(%rsi), %r9
19945 movq -4*8(%rsi), %r8
19946 leaq -4*8(%rsi), %rsi
19947
19948 movq %r11, -1*8(%rdi)
19949- movq %r10, -2*8(%rdi)
19950+ movq %rcx, -2*8(%rdi)
19951 movq %r9, -3*8(%rdi)
19952 movq %r8, -4*8(%rdi)
19953 leaq -4*8(%rdi), %rdi
19954@@ -151,11 +151,11 @@ ENTRY(memmove)
19955 * Move data from 16 bytes to 31 bytes.
19956 */
19957 movq 0*8(%rsi), %r11
19958- movq 1*8(%rsi), %r10
19959+ movq 1*8(%rsi), %rcx
19960 movq -2*8(%rsi, %rdx), %r9
19961 movq -1*8(%rsi, %rdx), %r8
19962 movq %r11, 0*8(%rdi)
19963- movq %r10, 1*8(%rdi)
19964+ movq %rcx, 1*8(%rdi)
19965 movq %r9, -2*8(%rdi, %rdx)
19966 movq %r8, -1*8(%rdi, %rdx)
19967 jmp 13f
19968@@ -167,9 +167,9 @@ ENTRY(memmove)
19969 * Move data from 8 bytes to 15 bytes.
19970 */
19971 movq 0*8(%rsi), %r11
19972- movq -1*8(%rsi, %rdx), %r10
19973+ movq -1*8(%rsi, %rdx), %r9
19974 movq %r11, 0*8(%rdi)
19975- movq %r10, -1*8(%rdi, %rdx)
19976+ movq %r9, -1*8(%rdi, %rdx)
19977 jmp 13f
19978 10:
19979 cmpq $4, %rdx
19980@@ -178,9 +178,9 @@ ENTRY(memmove)
19981 * Move data from 4 bytes to 7 bytes.
19982 */
19983 movl (%rsi), %r11d
19984- movl -4(%rsi, %rdx), %r10d
19985+ movl -4(%rsi, %rdx), %r9d
19986 movl %r11d, (%rdi)
19987- movl %r10d, -4(%rdi, %rdx)
19988+ movl %r9d, -4(%rdi, %rdx)
19989 jmp 13f
19990 11:
19991 cmp $2, %rdx
19992@@ -189,9 +189,9 @@ ENTRY(memmove)
19993 * Move data from 2 bytes to 3 bytes.
19994 */
19995 movw (%rsi), %r11w
19996- movw -2(%rsi, %rdx), %r10w
19997+ movw -2(%rsi, %rdx), %r9w
19998 movw %r11w, (%rdi)
19999- movw %r10w, -2(%rdi, %rdx)
20000+ movw %r9w, -2(%rdi, %rdx)
20001 jmp 13f
20002 12:
20003 cmp $1, %rdx
20004@@ -202,6 +202,7 @@ ENTRY(memmove)
20005 movb (%rsi), %r11b
20006 movb %r11b, (%rdi)
20007 13:
20008+ pax_force_retaddr
20009 retq
20010 CFI_ENDPROC
20011
20012@@ -210,6 +211,7 @@ ENTRY(memmove)
20013 /* Forward moving data. */
20014 movq %rdx, %rcx
20015 rep movsb
20016+ pax_force_retaddr
20017 retq
20018 .Lmemmove_end_forward_efs:
20019 .previous
20020diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20021index 79bd454..dff325a 100644
20022--- a/arch/x86/lib/memset_64.S
20023+++ b/arch/x86/lib/memset_64.S
20024@@ -31,6 +31,7 @@
20025 movl %r8d,%ecx
20026 rep stosb
20027 movq %r9,%rax
20028+ pax_force_retaddr
20029 ret
20030 .Lmemset_e:
20031 .previous
20032@@ -53,6 +54,7 @@
20033 movl %edx,%ecx
20034 rep stosb
20035 movq %r9,%rax
20036+ pax_force_retaddr
20037 ret
20038 .Lmemset_e_e:
20039 .previous
20040@@ -60,13 +62,13 @@
20041 ENTRY(memset)
20042 ENTRY(__memset)
20043 CFI_STARTPROC
20044- movq %rdi,%r10
20045 movq %rdx,%r11
20046
20047 /* expand byte value */
20048 movzbl %sil,%ecx
20049 movabs $0x0101010101010101,%rax
20050 mul %rcx /* with rax, clobbers rdx */
20051+ movq %rdi,%rdx
20052
20053 /* align dst */
20054 movl %edi,%r9d
20055@@ -120,7 +122,8 @@ ENTRY(__memset)
20056 jnz .Lloop_1
20057
20058 .Lende:
20059- movq %r10,%rax
20060+ movq %rdx,%rax
20061+ pax_force_retaddr
20062 ret
20063
20064 CFI_RESTORE_STATE
20065diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20066index c9f2d9b..e7fd2c0 100644
20067--- a/arch/x86/lib/mmx_32.c
20068+++ b/arch/x86/lib/mmx_32.c
20069@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20070 {
20071 void *p;
20072 int i;
20073+ unsigned long cr0;
20074
20075 if (unlikely(in_interrupt()))
20076 return __memcpy(to, from, len);
20077@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20078 kernel_fpu_begin();
20079
20080 __asm__ __volatile__ (
20081- "1: prefetch (%0)\n" /* This set is 28 bytes */
20082- " prefetch 64(%0)\n"
20083- " prefetch 128(%0)\n"
20084- " prefetch 192(%0)\n"
20085- " prefetch 256(%0)\n"
20086+ "1: prefetch (%1)\n" /* This set is 28 bytes */
20087+ " prefetch 64(%1)\n"
20088+ " prefetch 128(%1)\n"
20089+ " prefetch 192(%1)\n"
20090+ " prefetch 256(%1)\n"
20091 "2: \n"
20092 ".section .fixup, \"ax\"\n"
20093- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20094+ "3: \n"
20095+
20096+#ifdef CONFIG_PAX_KERNEXEC
20097+ " movl %%cr0, %0\n"
20098+ " movl %0, %%eax\n"
20099+ " andl $0xFFFEFFFF, %%eax\n"
20100+ " movl %%eax, %%cr0\n"
20101+#endif
20102+
20103+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20104+
20105+#ifdef CONFIG_PAX_KERNEXEC
20106+ " movl %0, %%cr0\n"
20107+#endif
20108+
20109 " jmp 2b\n"
20110 ".previous\n"
20111 _ASM_EXTABLE(1b, 3b)
20112- : : "r" (from));
20113+ : "=&r" (cr0) : "r" (from) : "ax");
20114
20115 for ( ; i > 5; i--) {
20116 __asm__ __volatile__ (
20117- "1: prefetch 320(%0)\n"
20118- "2: movq (%0), %%mm0\n"
20119- " movq 8(%0), %%mm1\n"
20120- " movq 16(%0), %%mm2\n"
20121- " movq 24(%0), %%mm3\n"
20122- " movq %%mm0, (%1)\n"
20123- " movq %%mm1, 8(%1)\n"
20124- " movq %%mm2, 16(%1)\n"
20125- " movq %%mm3, 24(%1)\n"
20126- " movq 32(%0), %%mm0\n"
20127- " movq 40(%0), %%mm1\n"
20128- " movq 48(%0), %%mm2\n"
20129- " movq 56(%0), %%mm3\n"
20130- " movq %%mm0, 32(%1)\n"
20131- " movq %%mm1, 40(%1)\n"
20132- " movq %%mm2, 48(%1)\n"
20133- " movq %%mm3, 56(%1)\n"
20134+ "1: prefetch 320(%1)\n"
20135+ "2: movq (%1), %%mm0\n"
20136+ " movq 8(%1), %%mm1\n"
20137+ " movq 16(%1), %%mm2\n"
20138+ " movq 24(%1), %%mm3\n"
20139+ " movq %%mm0, (%2)\n"
20140+ " movq %%mm1, 8(%2)\n"
20141+ " movq %%mm2, 16(%2)\n"
20142+ " movq %%mm3, 24(%2)\n"
20143+ " movq 32(%1), %%mm0\n"
20144+ " movq 40(%1), %%mm1\n"
20145+ " movq 48(%1), %%mm2\n"
20146+ " movq 56(%1), %%mm3\n"
20147+ " movq %%mm0, 32(%2)\n"
20148+ " movq %%mm1, 40(%2)\n"
20149+ " movq %%mm2, 48(%2)\n"
20150+ " movq %%mm3, 56(%2)\n"
20151 ".section .fixup, \"ax\"\n"
20152- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20153+ "3:\n"
20154+
20155+#ifdef CONFIG_PAX_KERNEXEC
20156+ " movl %%cr0, %0\n"
20157+ " movl %0, %%eax\n"
20158+ " andl $0xFFFEFFFF, %%eax\n"
20159+ " movl %%eax, %%cr0\n"
20160+#endif
20161+
20162+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20163+
20164+#ifdef CONFIG_PAX_KERNEXEC
20165+ " movl %0, %%cr0\n"
20166+#endif
20167+
20168 " jmp 2b\n"
20169 ".previous\n"
20170 _ASM_EXTABLE(1b, 3b)
20171- : : "r" (from), "r" (to) : "memory");
20172+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20173
20174 from += 64;
20175 to += 64;
20176@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20177 static void fast_copy_page(void *to, void *from)
20178 {
20179 int i;
20180+ unsigned long cr0;
20181
20182 kernel_fpu_begin();
20183
20184@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20185 * but that is for later. -AV
20186 */
20187 __asm__ __volatile__(
20188- "1: prefetch (%0)\n"
20189- " prefetch 64(%0)\n"
20190- " prefetch 128(%0)\n"
20191- " prefetch 192(%0)\n"
20192- " prefetch 256(%0)\n"
20193+ "1: prefetch (%1)\n"
20194+ " prefetch 64(%1)\n"
20195+ " prefetch 128(%1)\n"
20196+ " prefetch 192(%1)\n"
20197+ " prefetch 256(%1)\n"
20198 "2: \n"
20199 ".section .fixup, \"ax\"\n"
20200- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20201+ "3: \n"
20202+
20203+#ifdef CONFIG_PAX_KERNEXEC
20204+ " movl %%cr0, %0\n"
20205+ " movl %0, %%eax\n"
20206+ " andl $0xFFFEFFFF, %%eax\n"
20207+ " movl %%eax, %%cr0\n"
20208+#endif
20209+
20210+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20211+
20212+#ifdef CONFIG_PAX_KERNEXEC
20213+ " movl %0, %%cr0\n"
20214+#endif
20215+
20216 " jmp 2b\n"
20217 ".previous\n"
20218- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20219+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20220
20221 for (i = 0; i < (4096-320)/64; i++) {
20222 __asm__ __volatile__ (
20223- "1: prefetch 320(%0)\n"
20224- "2: movq (%0), %%mm0\n"
20225- " movntq %%mm0, (%1)\n"
20226- " movq 8(%0), %%mm1\n"
20227- " movntq %%mm1, 8(%1)\n"
20228- " movq 16(%0), %%mm2\n"
20229- " movntq %%mm2, 16(%1)\n"
20230- " movq 24(%0), %%mm3\n"
20231- " movntq %%mm3, 24(%1)\n"
20232- " movq 32(%0), %%mm4\n"
20233- " movntq %%mm4, 32(%1)\n"
20234- " movq 40(%0), %%mm5\n"
20235- " movntq %%mm5, 40(%1)\n"
20236- " movq 48(%0), %%mm6\n"
20237- " movntq %%mm6, 48(%1)\n"
20238- " movq 56(%0), %%mm7\n"
20239- " movntq %%mm7, 56(%1)\n"
20240+ "1: prefetch 320(%1)\n"
20241+ "2: movq (%1), %%mm0\n"
20242+ " movntq %%mm0, (%2)\n"
20243+ " movq 8(%1), %%mm1\n"
20244+ " movntq %%mm1, 8(%2)\n"
20245+ " movq 16(%1), %%mm2\n"
20246+ " movntq %%mm2, 16(%2)\n"
20247+ " movq 24(%1), %%mm3\n"
20248+ " movntq %%mm3, 24(%2)\n"
20249+ " movq 32(%1), %%mm4\n"
20250+ " movntq %%mm4, 32(%2)\n"
20251+ " movq 40(%1), %%mm5\n"
20252+ " movntq %%mm5, 40(%2)\n"
20253+ " movq 48(%1), %%mm6\n"
20254+ " movntq %%mm6, 48(%2)\n"
20255+ " movq 56(%1), %%mm7\n"
20256+ " movntq %%mm7, 56(%2)\n"
20257 ".section .fixup, \"ax\"\n"
20258- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20259+ "3:\n"
20260+
20261+#ifdef CONFIG_PAX_KERNEXEC
20262+ " movl %%cr0, %0\n"
20263+ " movl %0, %%eax\n"
20264+ " andl $0xFFFEFFFF, %%eax\n"
20265+ " movl %%eax, %%cr0\n"
20266+#endif
20267+
20268+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20269+
20270+#ifdef CONFIG_PAX_KERNEXEC
20271+ " movl %0, %%cr0\n"
20272+#endif
20273+
20274 " jmp 2b\n"
20275 ".previous\n"
20276- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20277+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20278
20279 from += 64;
20280 to += 64;
20281@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20282 static void fast_copy_page(void *to, void *from)
20283 {
20284 int i;
20285+ unsigned long cr0;
20286
20287 kernel_fpu_begin();
20288
20289 __asm__ __volatile__ (
20290- "1: prefetch (%0)\n"
20291- " prefetch 64(%0)\n"
20292- " prefetch 128(%0)\n"
20293- " prefetch 192(%0)\n"
20294- " prefetch 256(%0)\n"
20295+ "1: prefetch (%1)\n"
20296+ " prefetch 64(%1)\n"
20297+ " prefetch 128(%1)\n"
20298+ " prefetch 192(%1)\n"
20299+ " prefetch 256(%1)\n"
20300 "2: \n"
20301 ".section .fixup, \"ax\"\n"
20302- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20303+ "3: \n"
20304+
20305+#ifdef CONFIG_PAX_KERNEXEC
20306+ " movl %%cr0, %0\n"
20307+ " movl %0, %%eax\n"
20308+ " andl $0xFFFEFFFF, %%eax\n"
20309+ " movl %%eax, %%cr0\n"
20310+#endif
20311+
20312+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20313+
20314+#ifdef CONFIG_PAX_KERNEXEC
20315+ " movl %0, %%cr0\n"
20316+#endif
20317+
20318 " jmp 2b\n"
20319 ".previous\n"
20320- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20321+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20322
20323 for (i = 0; i < 4096/64; i++) {
20324 __asm__ __volatile__ (
20325- "1: prefetch 320(%0)\n"
20326- "2: movq (%0), %%mm0\n"
20327- " movq 8(%0), %%mm1\n"
20328- " movq 16(%0), %%mm2\n"
20329- " movq 24(%0), %%mm3\n"
20330- " movq %%mm0, (%1)\n"
20331- " movq %%mm1, 8(%1)\n"
20332- " movq %%mm2, 16(%1)\n"
20333- " movq %%mm3, 24(%1)\n"
20334- " movq 32(%0), %%mm0\n"
20335- " movq 40(%0), %%mm1\n"
20336- " movq 48(%0), %%mm2\n"
20337- " movq 56(%0), %%mm3\n"
20338- " movq %%mm0, 32(%1)\n"
20339- " movq %%mm1, 40(%1)\n"
20340- " movq %%mm2, 48(%1)\n"
20341- " movq %%mm3, 56(%1)\n"
20342+ "1: prefetch 320(%1)\n"
20343+ "2: movq (%1), %%mm0\n"
20344+ " movq 8(%1), %%mm1\n"
20345+ " movq 16(%1), %%mm2\n"
20346+ " movq 24(%1), %%mm3\n"
20347+ " movq %%mm0, (%2)\n"
20348+ " movq %%mm1, 8(%2)\n"
20349+ " movq %%mm2, 16(%2)\n"
20350+ " movq %%mm3, 24(%2)\n"
20351+ " movq 32(%1), %%mm0\n"
20352+ " movq 40(%1), %%mm1\n"
20353+ " movq 48(%1), %%mm2\n"
20354+ " movq 56(%1), %%mm3\n"
20355+ " movq %%mm0, 32(%2)\n"
20356+ " movq %%mm1, 40(%2)\n"
20357+ " movq %%mm2, 48(%2)\n"
20358+ " movq %%mm3, 56(%2)\n"
20359 ".section .fixup, \"ax\"\n"
20360- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20361+ "3:\n"
20362+
20363+#ifdef CONFIG_PAX_KERNEXEC
20364+ " movl %%cr0, %0\n"
20365+ " movl %0, %%eax\n"
20366+ " andl $0xFFFEFFFF, %%eax\n"
20367+ " movl %%eax, %%cr0\n"
20368+#endif
20369+
20370+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20371+
20372+#ifdef CONFIG_PAX_KERNEXEC
20373+ " movl %0, %%cr0\n"
20374+#endif
20375+
20376 " jmp 2b\n"
20377 ".previous\n"
20378 _ASM_EXTABLE(1b, 3b)
20379- : : "r" (from), "r" (to) : "memory");
20380+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20381
20382 from += 64;
20383 to += 64;
20384diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20385index 69fa106..adda88b 100644
20386--- a/arch/x86/lib/msr-reg.S
20387+++ b/arch/x86/lib/msr-reg.S
20388@@ -3,6 +3,7 @@
20389 #include <asm/dwarf2.h>
20390 #include <asm/asm.h>
20391 #include <asm/msr.h>
20392+#include <asm/alternative-asm.h>
20393
20394 #ifdef CONFIG_X86_64
20395 /*
20396@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20397 CFI_STARTPROC
20398 pushq_cfi %rbx
20399 pushq_cfi %rbp
20400- movq %rdi, %r10 /* Save pointer */
20401+ movq %rdi, %r9 /* Save pointer */
20402 xorl %r11d, %r11d /* Return value */
20403 movl (%rdi), %eax
20404 movl 4(%rdi), %ecx
20405@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20406 movl 28(%rdi), %edi
20407 CFI_REMEMBER_STATE
20408 1: \op
20409-2: movl %eax, (%r10)
20410+2: movl %eax, (%r9)
20411 movl %r11d, %eax /* Return value */
20412- movl %ecx, 4(%r10)
20413- movl %edx, 8(%r10)
20414- movl %ebx, 12(%r10)
20415- movl %ebp, 20(%r10)
20416- movl %esi, 24(%r10)
20417- movl %edi, 28(%r10)
20418+ movl %ecx, 4(%r9)
20419+ movl %edx, 8(%r9)
20420+ movl %ebx, 12(%r9)
20421+ movl %ebp, 20(%r9)
20422+ movl %esi, 24(%r9)
20423+ movl %edi, 28(%r9)
20424 popq_cfi %rbp
20425 popq_cfi %rbx
20426+ pax_force_retaddr
20427 ret
20428 3:
20429 CFI_RESTORE_STATE
20430diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20431index 36b0d15..d381858 100644
20432--- a/arch/x86/lib/putuser.S
20433+++ b/arch/x86/lib/putuser.S
20434@@ -15,7 +15,9 @@
20435 #include <asm/thread_info.h>
20436 #include <asm/errno.h>
20437 #include <asm/asm.h>
20438-
20439+#include <asm/segment.h>
20440+#include <asm/pgtable.h>
20441+#include <asm/alternative-asm.h>
20442
20443 /*
20444 * __put_user_X
20445@@ -29,52 +31,119 @@
20446 * as they get called from within inline assembly.
20447 */
20448
20449-#define ENTER CFI_STARTPROC ; \
20450- GET_THREAD_INFO(%_ASM_BX)
20451-#define EXIT ret ; \
20452+#define ENTER CFI_STARTPROC
20453+#define EXIT pax_force_retaddr; ret ; \
20454 CFI_ENDPROC
20455
20456+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20457+#define _DEST %_ASM_CX,%_ASM_BX
20458+#else
20459+#define _DEST %_ASM_CX
20460+#endif
20461+
20462+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20463+#define __copyuser_seg gs;
20464+#else
20465+#define __copyuser_seg
20466+#endif
20467+
20468 .text
20469 ENTRY(__put_user_1)
20470 ENTER
20471+
20472+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20473+ GET_THREAD_INFO(%_ASM_BX)
20474 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20475 jae bad_put_user
20476-1: movb %al,(%_ASM_CX)
20477+
20478+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20479+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20480+ cmp %_ASM_BX,%_ASM_CX
20481+ jb 1234f
20482+ xor %ebx,%ebx
20483+1234:
20484+#endif
20485+
20486+#endif
20487+
20488+1: __copyuser_seg movb %al,(_DEST)
20489 xor %eax,%eax
20490 EXIT
20491 ENDPROC(__put_user_1)
20492
20493 ENTRY(__put_user_2)
20494 ENTER
20495+
20496+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20497+ GET_THREAD_INFO(%_ASM_BX)
20498 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20499 sub $1,%_ASM_BX
20500 cmp %_ASM_BX,%_ASM_CX
20501 jae bad_put_user
20502-2: movw %ax,(%_ASM_CX)
20503+
20504+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20505+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20506+ cmp %_ASM_BX,%_ASM_CX
20507+ jb 1234f
20508+ xor %ebx,%ebx
20509+1234:
20510+#endif
20511+
20512+#endif
20513+
20514+2: __copyuser_seg movw %ax,(_DEST)
20515 xor %eax,%eax
20516 EXIT
20517 ENDPROC(__put_user_2)
20518
20519 ENTRY(__put_user_4)
20520 ENTER
20521+
20522+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20523+ GET_THREAD_INFO(%_ASM_BX)
20524 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20525 sub $3,%_ASM_BX
20526 cmp %_ASM_BX,%_ASM_CX
20527 jae bad_put_user
20528-3: movl %eax,(%_ASM_CX)
20529+
20530+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20531+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20532+ cmp %_ASM_BX,%_ASM_CX
20533+ jb 1234f
20534+ xor %ebx,%ebx
20535+1234:
20536+#endif
20537+
20538+#endif
20539+
20540+3: __copyuser_seg movl %eax,(_DEST)
20541 xor %eax,%eax
20542 EXIT
20543 ENDPROC(__put_user_4)
20544
20545 ENTRY(__put_user_8)
20546 ENTER
20547+
20548+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20549+ GET_THREAD_INFO(%_ASM_BX)
20550 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20551 sub $7,%_ASM_BX
20552 cmp %_ASM_BX,%_ASM_CX
20553 jae bad_put_user
20554-4: mov %_ASM_AX,(%_ASM_CX)
20555+
20556+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20557+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20558+ cmp %_ASM_BX,%_ASM_CX
20559+ jb 1234f
20560+ xor %ebx,%ebx
20561+1234:
20562+#endif
20563+
20564+#endif
20565+
20566+4: __copyuser_seg mov %_ASM_AX,(_DEST)
20567 #ifdef CONFIG_X86_32
20568-5: movl %edx,4(%_ASM_CX)
20569+5: __copyuser_seg movl %edx,4(_DEST)
20570 #endif
20571 xor %eax,%eax
20572 EXIT
20573diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20574index 1cad221..de671ee 100644
20575--- a/arch/x86/lib/rwlock.S
20576+++ b/arch/x86/lib/rwlock.S
20577@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20578 FRAME
20579 0: LOCK_PREFIX
20580 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20581+
20582+#ifdef CONFIG_PAX_REFCOUNT
20583+ jno 1234f
20584+ LOCK_PREFIX
20585+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20586+ int $4
20587+1234:
20588+ _ASM_EXTABLE(1234b, 1234b)
20589+#endif
20590+
20591 1: rep; nop
20592 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20593 jne 1b
20594 LOCK_PREFIX
20595 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20596+
20597+#ifdef CONFIG_PAX_REFCOUNT
20598+ jno 1234f
20599+ LOCK_PREFIX
20600+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20601+ int $4
20602+1234:
20603+ _ASM_EXTABLE(1234b, 1234b)
20604+#endif
20605+
20606 jnz 0b
20607 ENDFRAME
20608+ pax_force_retaddr
20609 ret
20610 CFI_ENDPROC
20611 END(__write_lock_failed)
20612@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20613 FRAME
20614 0: LOCK_PREFIX
20615 READ_LOCK_SIZE(inc) (%__lock_ptr)
20616+
20617+#ifdef CONFIG_PAX_REFCOUNT
20618+ jno 1234f
20619+ LOCK_PREFIX
20620+ READ_LOCK_SIZE(dec) (%__lock_ptr)
20621+ int $4
20622+1234:
20623+ _ASM_EXTABLE(1234b, 1234b)
20624+#endif
20625+
20626 1: rep; nop
20627 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20628 js 1b
20629 LOCK_PREFIX
20630 READ_LOCK_SIZE(dec) (%__lock_ptr)
20631+
20632+#ifdef CONFIG_PAX_REFCOUNT
20633+ jno 1234f
20634+ LOCK_PREFIX
20635+ READ_LOCK_SIZE(inc) (%__lock_ptr)
20636+ int $4
20637+1234:
20638+ _ASM_EXTABLE(1234b, 1234b)
20639+#endif
20640+
20641 js 0b
20642 ENDFRAME
20643+ pax_force_retaddr
20644 ret
20645 CFI_ENDPROC
20646 END(__read_lock_failed)
20647diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20648index 5dff5f0..cadebf4 100644
20649--- a/arch/x86/lib/rwsem.S
20650+++ b/arch/x86/lib/rwsem.S
20651@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20652 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20653 CFI_RESTORE __ASM_REG(dx)
20654 restore_common_regs
20655+ pax_force_retaddr
20656 ret
20657 CFI_ENDPROC
20658 ENDPROC(call_rwsem_down_read_failed)
20659@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20660 movq %rax,%rdi
20661 call rwsem_down_write_failed
20662 restore_common_regs
20663+ pax_force_retaddr
20664 ret
20665 CFI_ENDPROC
20666 ENDPROC(call_rwsem_down_write_failed)
20667@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20668 movq %rax,%rdi
20669 call rwsem_wake
20670 restore_common_regs
20671-1: ret
20672+1: pax_force_retaddr
20673+ ret
20674 CFI_ENDPROC
20675 ENDPROC(call_rwsem_wake)
20676
20677@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20678 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20679 CFI_RESTORE __ASM_REG(dx)
20680 restore_common_regs
20681+ pax_force_retaddr
20682 ret
20683 CFI_ENDPROC
20684 ENDPROC(call_rwsem_downgrade_wake)
20685diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20686index a63efd6..ccecad8 100644
20687--- a/arch/x86/lib/thunk_64.S
20688+++ b/arch/x86/lib/thunk_64.S
20689@@ -8,6 +8,7 @@
20690 #include <linux/linkage.h>
20691 #include <asm/dwarf2.h>
20692 #include <asm/calling.h>
20693+#include <asm/alternative-asm.h>
20694
20695 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20696 .macro THUNK name, func, put_ret_addr_in_rdi=0
20697@@ -41,5 +42,6 @@
20698 SAVE_ARGS
20699 restore:
20700 RESTORE_ARGS
20701+ pax_force_retaddr
20702 ret
20703 CFI_ENDPROC
20704diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20705index e218d5d..35679b4 100644
20706--- a/arch/x86/lib/usercopy_32.c
20707+++ b/arch/x86/lib/usercopy_32.c
20708@@ -43,7 +43,7 @@ do { \
20709 __asm__ __volatile__( \
20710 " testl %1,%1\n" \
20711 " jz 2f\n" \
20712- "0: lodsb\n" \
20713+ "0: "__copyuser_seg"lodsb\n" \
20714 " stosb\n" \
20715 " testb %%al,%%al\n" \
20716 " jz 1f\n" \
20717@@ -128,10 +128,12 @@ do { \
20718 int __d0; \
20719 might_fault(); \
20720 __asm__ __volatile__( \
20721+ __COPYUSER_SET_ES \
20722 "0: rep; stosl\n" \
20723 " movl %2,%0\n" \
20724 "1: rep; stosb\n" \
20725 "2:\n" \
20726+ __COPYUSER_RESTORE_ES \
20727 ".section .fixup,\"ax\"\n" \
20728 "3: lea 0(%2,%0,4),%0\n" \
20729 " jmp 2b\n" \
20730@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
20731 might_fault();
20732
20733 __asm__ __volatile__(
20734+ __COPYUSER_SET_ES
20735 " testl %0, %0\n"
20736 " jz 3f\n"
20737 " andl %0,%%ecx\n"
20738@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
20739 " subl %%ecx,%0\n"
20740 " addl %0,%%eax\n"
20741 "1:\n"
20742+ __COPYUSER_RESTORE_ES
20743 ".section .fixup,\"ax\"\n"
20744 "2: xorl %%eax,%%eax\n"
20745 " jmp 1b\n"
20746@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20747
20748 #ifdef CONFIG_X86_INTEL_USERCOPY
20749 static unsigned long
20750-__copy_user_intel(void __user *to, const void *from, unsigned long size)
20751+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20752 {
20753 int d0, d1;
20754 __asm__ __volatile__(
20755@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20756 " .align 2,0x90\n"
20757 "3: movl 0(%4), %%eax\n"
20758 "4: movl 4(%4), %%edx\n"
20759- "5: movl %%eax, 0(%3)\n"
20760- "6: movl %%edx, 4(%3)\n"
20761+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20762+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20763 "7: movl 8(%4), %%eax\n"
20764 "8: movl 12(%4),%%edx\n"
20765- "9: movl %%eax, 8(%3)\n"
20766- "10: movl %%edx, 12(%3)\n"
20767+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20768+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20769 "11: movl 16(%4), %%eax\n"
20770 "12: movl 20(%4), %%edx\n"
20771- "13: movl %%eax, 16(%3)\n"
20772- "14: movl %%edx, 20(%3)\n"
20773+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20774+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20775 "15: movl 24(%4), %%eax\n"
20776 "16: movl 28(%4), %%edx\n"
20777- "17: movl %%eax, 24(%3)\n"
20778- "18: movl %%edx, 28(%3)\n"
20779+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20780+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20781 "19: movl 32(%4), %%eax\n"
20782 "20: movl 36(%4), %%edx\n"
20783- "21: movl %%eax, 32(%3)\n"
20784- "22: movl %%edx, 36(%3)\n"
20785+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20786+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20787 "23: movl 40(%4), %%eax\n"
20788 "24: movl 44(%4), %%edx\n"
20789- "25: movl %%eax, 40(%3)\n"
20790- "26: movl %%edx, 44(%3)\n"
20791+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20792+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20793 "27: movl 48(%4), %%eax\n"
20794 "28: movl 52(%4), %%edx\n"
20795- "29: movl %%eax, 48(%3)\n"
20796- "30: movl %%edx, 52(%3)\n"
20797+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20798+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20799 "31: movl 56(%4), %%eax\n"
20800 "32: movl 60(%4), %%edx\n"
20801- "33: movl %%eax, 56(%3)\n"
20802- "34: movl %%edx, 60(%3)\n"
20803+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20804+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20805 " addl $-64, %0\n"
20806 " addl $64, %4\n"
20807 " addl $64, %3\n"
20808@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20809 " shrl $2, %0\n"
20810 " andl $3, %%eax\n"
20811 " cld\n"
20812+ __COPYUSER_SET_ES
20813 "99: rep; movsl\n"
20814 "36: movl %%eax, %0\n"
20815 "37: rep; movsb\n"
20816 "100:\n"
20817+ __COPYUSER_RESTORE_ES
20818+ ".section .fixup,\"ax\"\n"
20819+ "101: lea 0(%%eax,%0,4),%0\n"
20820+ " jmp 100b\n"
20821+ ".previous\n"
20822+ ".section __ex_table,\"a\"\n"
20823+ " .align 4\n"
20824+ " .long 1b,100b\n"
20825+ " .long 2b,100b\n"
20826+ " .long 3b,100b\n"
20827+ " .long 4b,100b\n"
20828+ " .long 5b,100b\n"
20829+ " .long 6b,100b\n"
20830+ " .long 7b,100b\n"
20831+ " .long 8b,100b\n"
20832+ " .long 9b,100b\n"
20833+ " .long 10b,100b\n"
20834+ " .long 11b,100b\n"
20835+ " .long 12b,100b\n"
20836+ " .long 13b,100b\n"
20837+ " .long 14b,100b\n"
20838+ " .long 15b,100b\n"
20839+ " .long 16b,100b\n"
20840+ " .long 17b,100b\n"
20841+ " .long 18b,100b\n"
20842+ " .long 19b,100b\n"
20843+ " .long 20b,100b\n"
20844+ " .long 21b,100b\n"
20845+ " .long 22b,100b\n"
20846+ " .long 23b,100b\n"
20847+ " .long 24b,100b\n"
20848+ " .long 25b,100b\n"
20849+ " .long 26b,100b\n"
20850+ " .long 27b,100b\n"
20851+ " .long 28b,100b\n"
20852+ " .long 29b,100b\n"
20853+ " .long 30b,100b\n"
20854+ " .long 31b,100b\n"
20855+ " .long 32b,100b\n"
20856+ " .long 33b,100b\n"
20857+ " .long 34b,100b\n"
20858+ " .long 35b,100b\n"
20859+ " .long 36b,100b\n"
20860+ " .long 37b,100b\n"
20861+ " .long 99b,101b\n"
20862+ ".previous"
20863+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
20864+ : "1"(to), "2"(from), "0"(size)
20865+ : "eax", "edx", "memory");
20866+ return size;
20867+}
20868+
20869+static unsigned long
20870+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20871+{
20872+ int d0, d1;
20873+ __asm__ __volatile__(
20874+ " .align 2,0x90\n"
20875+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20876+ " cmpl $67, %0\n"
20877+ " jbe 3f\n"
20878+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20879+ " .align 2,0x90\n"
20880+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20881+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20882+ "5: movl %%eax, 0(%3)\n"
20883+ "6: movl %%edx, 4(%3)\n"
20884+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20885+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20886+ "9: movl %%eax, 8(%3)\n"
20887+ "10: movl %%edx, 12(%3)\n"
20888+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20889+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20890+ "13: movl %%eax, 16(%3)\n"
20891+ "14: movl %%edx, 20(%3)\n"
20892+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20893+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20894+ "17: movl %%eax, 24(%3)\n"
20895+ "18: movl %%edx, 28(%3)\n"
20896+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20897+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20898+ "21: movl %%eax, 32(%3)\n"
20899+ "22: movl %%edx, 36(%3)\n"
20900+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20901+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20902+ "25: movl %%eax, 40(%3)\n"
20903+ "26: movl %%edx, 44(%3)\n"
20904+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20905+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20906+ "29: movl %%eax, 48(%3)\n"
20907+ "30: movl %%edx, 52(%3)\n"
20908+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20909+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20910+ "33: movl %%eax, 56(%3)\n"
20911+ "34: movl %%edx, 60(%3)\n"
20912+ " addl $-64, %0\n"
20913+ " addl $64, %4\n"
20914+ " addl $64, %3\n"
20915+ " cmpl $63, %0\n"
20916+ " ja 1b\n"
20917+ "35: movl %0, %%eax\n"
20918+ " shrl $2, %0\n"
20919+ " andl $3, %%eax\n"
20920+ " cld\n"
20921+ "99: rep; "__copyuser_seg" movsl\n"
20922+ "36: movl %%eax, %0\n"
20923+ "37: rep; "__copyuser_seg" movsb\n"
20924+ "100:\n"
20925 ".section .fixup,\"ax\"\n"
20926 "101: lea 0(%%eax,%0,4),%0\n"
20927 " jmp 100b\n"
20928@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20929 int d0, d1;
20930 __asm__ __volatile__(
20931 " .align 2,0x90\n"
20932- "0: movl 32(%4), %%eax\n"
20933+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20934 " cmpl $67, %0\n"
20935 " jbe 2f\n"
20936- "1: movl 64(%4), %%eax\n"
20937+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20938 " .align 2,0x90\n"
20939- "2: movl 0(%4), %%eax\n"
20940- "21: movl 4(%4), %%edx\n"
20941+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20942+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20943 " movl %%eax, 0(%3)\n"
20944 " movl %%edx, 4(%3)\n"
20945- "3: movl 8(%4), %%eax\n"
20946- "31: movl 12(%4),%%edx\n"
20947+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20948+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20949 " movl %%eax, 8(%3)\n"
20950 " movl %%edx, 12(%3)\n"
20951- "4: movl 16(%4), %%eax\n"
20952- "41: movl 20(%4), %%edx\n"
20953+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20954+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20955 " movl %%eax, 16(%3)\n"
20956 " movl %%edx, 20(%3)\n"
20957- "10: movl 24(%4), %%eax\n"
20958- "51: movl 28(%4), %%edx\n"
20959+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20960+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20961 " movl %%eax, 24(%3)\n"
20962 " movl %%edx, 28(%3)\n"
20963- "11: movl 32(%4), %%eax\n"
20964- "61: movl 36(%4), %%edx\n"
20965+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20966+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20967 " movl %%eax, 32(%3)\n"
20968 " movl %%edx, 36(%3)\n"
20969- "12: movl 40(%4), %%eax\n"
20970- "71: movl 44(%4), %%edx\n"
20971+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20972+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20973 " movl %%eax, 40(%3)\n"
20974 " movl %%edx, 44(%3)\n"
20975- "13: movl 48(%4), %%eax\n"
20976- "81: movl 52(%4), %%edx\n"
20977+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20978+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20979 " movl %%eax, 48(%3)\n"
20980 " movl %%edx, 52(%3)\n"
20981- "14: movl 56(%4), %%eax\n"
20982- "91: movl 60(%4), %%edx\n"
20983+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20984+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20985 " movl %%eax, 56(%3)\n"
20986 " movl %%edx, 60(%3)\n"
20987 " addl $-64, %0\n"
20988@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20989 " shrl $2, %0\n"
20990 " andl $3, %%eax\n"
20991 " cld\n"
20992- "6: rep; movsl\n"
20993+ "6: rep; "__copyuser_seg" movsl\n"
20994 " movl %%eax,%0\n"
20995- "7: rep; movsb\n"
20996+ "7: rep; "__copyuser_seg" movsb\n"
20997 "8:\n"
20998 ".section .fixup,\"ax\"\n"
20999 "9: lea 0(%%eax,%0,4),%0\n"
21000@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21001
21002 __asm__ __volatile__(
21003 " .align 2,0x90\n"
21004- "0: movl 32(%4), %%eax\n"
21005+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21006 " cmpl $67, %0\n"
21007 " jbe 2f\n"
21008- "1: movl 64(%4), %%eax\n"
21009+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21010 " .align 2,0x90\n"
21011- "2: movl 0(%4), %%eax\n"
21012- "21: movl 4(%4), %%edx\n"
21013+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21014+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21015 " movnti %%eax, 0(%3)\n"
21016 " movnti %%edx, 4(%3)\n"
21017- "3: movl 8(%4), %%eax\n"
21018- "31: movl 12(%4),%%edx\n"
21019+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21020+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21021 " movnti %%eax, 8(%3)\n"
21022 " movnti %%edx, 12(%3)\n"
21023- "4: movl 16(%4), %%eax\n"
21024- "41: movl 20(%4), %%edx\n"
21025+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21026+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21027 " movnti %%eax, 16(%3)\n"
21028 " movnti %%edx, 20(%3)\n"
21029- "10: movl 24(%4), %%eax\n"
21030- "51: movl 28(%4), %%edx\n"
21031+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21032+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21033 " movnti %%eax, 24(%3)\n"
21034 " movnti %%edx, 28(%3)\n"
21035- "11: movl 32(%4), %%eax\n"
21036- "61: movl 36(%4), %%edx\n"
21037+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21038+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21039 " movnti %%eax, 32(%3)\n"
21040 " movnti %%edx, 36(%3)\n"
21041- "12: movl 40(%4), %%eax\n"
21042- "71: movl 44(%4), %%edx\n"
21043+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21044+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21045 " movnti %%eax, 40(%3)\n"
21046 " movnti %%edx, 44(%3)\n"
21047- "13: movl 48(%4), %%eax\n"
21048- "81: movl 52(%4), %%edx\n"
21049+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21050+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21051 " movnti %%eax, 48(%3)\n"
21052 " movnti %%edx, 52(%3)\n"
21053- "14: movl 56(%4), %%eax\n"
21054- "91: movl 60(%4), %%edx\n"
21055+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21056+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21057 " movnti %%eax, 56(%3)\n"
21058 " movnti %%edx, 60(%3)\n"
21059 " addl $-64, %0\n"
21060@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21061 " shrl $2, %0\n"
21062 " andl $3, %%eax\n"
21063 " cld\n"
21064- "6: rep; movsl\n"
21065+ "6: rep; "__copyuser_seg" movsl\n"
21066 " movl %%eax,%0\n"
21067- "7: rep; movsb\n"
21068+ "7: rep; "__copyuser_seg" movsb\n"
21069 "8:\n"
21070 ".section .fixup,\"ax\"\n"
21071 "9: lea 0(%%eax,%0,4),%0\n"
21072@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21073
21074 __asm__ __volatile__(
21075 " .align 2,0x90\n"
21076- "0: movl 32(%4), %%eax\n"
21077+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21078 " cmpl $67, %0\n"
21079 " jbe 2f\n"
21080- "1: movl 64(%4), %%eax\n"
21081+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21082 " .align 2,0x90\n"
21083- "2: movl 0(%4), %%eax\n"
21084- "21: movl 4(%4), %%edx\n"
21085+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21086+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21087 " movnti %%eax, 0(%3)\n"
21088 " movnti %%edx, 4(%3)\n"
21089- "3: movl 8(%4), %%eax\n"
21090- "31: movl 12(%4),%%edx\n"
21091+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21092+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21093 " movnti %%eax, 8(%3)\n"
21094 " movnti %%edx, 12(%3)\n"
21095- "4: movl 16(%4), %%eax\n"
21096- "41: movl 20(%4), %%edx\n"
21097+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21098+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21099 " movnti %%eax, 16(%3)\n"
21100 " movnti %%edx, 20(%3)\n"
21101- "10: movl 24(%4), %%eax\n"
21102- "51: movl 28(%4), %%edx\n"
21103+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21104+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21105 " movnti %%eax, 24(%3)\n"
21106 " movnti %%edx, 28(%3)\n"
21107- "11: movl 32(%4), %%eax\n"
21108- "61: movl 36(%4), %%edx\n"
21109+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21110+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21111 " movnti %%eax, 32(%3)\n"
21112 " movnti %%edx, 36(%3)\n"
21113- "12: movl 40(%4), %%eax\n"
21114- "71: movl 44(%4), %%edx\n"
21115+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21116+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21117 " movnti %%eax, 40(%3)\n"
21118 " movnti %%edx, 44(%3)\n"
21119- "13: movl 48(%4), %%eax\n"
21120- "81: movl 52(%4), %%edx\n"
21121+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21122+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21123 " movnti %%eax, 48(%3)\n"
21124 " movnti %%edx, 52(%3)\n"
21125- "14: movl 56(%4), %%eax\n"
21126- "91: movl 60(%4), %%edx\n"
21127+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21128+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21129 " movnti %%eax, 56(%3)\n"
21130 " movnti %%edx, 60(%3)\n"
21131 " addl $-64, %0\n"
21132@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21133 " shrl $2, %0\n"
21134 " andl $3, %%eax\n"
21135 " cld\n"
21136- "6: rep; movsl\n"
21137+ "6: rep; "__copyuser_seg" movsl\n"
21138 " movl %%eax,%0\n"
21139- "7: rep; movsb\n"
21140+ "7: rep; "__copyuser_seg" movsb\n"
21141 "8:\n"
21142 ".section .fixup,\"ax\"\n"
21143 "9: lea 0(%%eax,%0,4),%0\n"
21144@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21145 */
21146 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21147 unsigned long size);
21148-unsigned long __copy_user_intel(void __user *to, const void *from,
21149+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21150+ unsigned long size);
21151+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21152 unsigned long size);
21153 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21154 const void __user *from, unsigned long size);
21155 #endif /* CONFIG_X86_INTEL_USERCOPY */
21156
21157 /* Generic arbitrary sized copy. */
21158-#define __copy_user(to, from, size) \
21159+#define __copy_user(to, from, size, prefix, set, restore) \
21160 do { \
21161 int __d0, __d1, __d2; \
21162 __asm__ __volatile__( \
21163+ set \
21164 " cmp $7,%0\n" \
21165 " jbe 1f\n" \
21166 " movl %1,%0\n" \
21167 " negl %0\n" \
21168 " andl $7,%0\n" \
21169 " subl %0,%3\n" \
21170- "4: rep; movsb\n" \
21171+ "4: rep; "prefix"movsb\n" \
21172 " movl %3,%0\n" \
21173 " shrl $2,%0\n" \
21174 " andl $3,%3\n" \
21175 " .align 2,0x90\n" \
21176- "0: rep; movsl\n" \
21177+ "0: rep; "prefix"movsl\n" \
21178 " movl %3,%0\n" \
21179- "1: rep; movsb\n" \
21180+ "1: rep; "prefix"movsb\n" \
21181 "2:\n" \
21182+ restore \
21183 ".section .fixup,\"ax\"\n" \
21184 "5: addl %3,%0\n" \
21185 " jmp 2b\n" \
21186@@ -682,14 +799,14 @@ do { \
21187 " negl %0\n" \
21188 " andl $7,%0\n" \
21189 " subl %0,%3\n" \
21190- "4: rep; movsb\n" \
21191+ "4: rep; "__copyuser_seg"movsb\n" \
21192 " movl %3,%0\n" \
21193 " shrl $2,%0\n" \
21194 " andl $3,%3\n" \
21195 " .align 2,0x90\n" \
21196- "0: rep; movsl\n" \
21197+ "0: rep; "__copyuser_seg"movsl\n" \
21198 " movl %3,%0\n" \
21199- "1: rep; movsb\n" \
21200+ "1: rep; "__copyuser_seg"movsb\n" \
21201 "2:\n" \
21202 ".section .fixup,\"ax\"\n" \
21203 "5: addl %3,%0\n" \
21204@@ -775,9 +892,9 @@ survive:
21205 }
21206 #endif
21207 if (movsl_is_ok(to, from, n))
21208- __copy_user(to, from, n);
21209+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21210 else
21211- n = __copy_user_intel(to, from, n);
21212+ n = __generic_copy_to_user_intel(to, from, n);
21213 return n;
21214 }
21215 EXPORT_SYMBOL(__copy_to_user_ll);
21216@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21217 unsigned long n)
21218 {
21219 if (movsl_is_ok(to, from, n))
21220- __copy_user(to, from, n);
21221+ __copy_user(to, from, n, __copyuser_seg, "", "");
21222 else
21223- n = __copy_user_intel((void __user *)to,
21224- (const void *)from, n);
21225+ n = __generic_copy_from_user_intel(to, from, n);
21226 return n;
21227 }
21228 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21229@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21230 if (n > 64 && cpu_has_xmm2)
21231 n = __copy_user_intel_nocache(to, from, n);
21232 else
21233- __copy_user(to, from, n);
21234+ __copy_user(to, from, n, __copyuser_seg, "", "");
21235 #else
21236- __copy_user(to, from, n);
21237+ __copy_user(to, from, n, __copyuser_seg, "", "");
21238 #endif
21239 return n;
21240 }
21241 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21242
21243-/**
21244- * copy_to_user: - Copy a block of data into user space.
21245- * @to: Destination address, in user space.
21246- * @from: Source address, in kernel space.
21247- * @n: Number of bytes to copy.
21248- *
21249- * Context: User context only. This function may sleep.
21250- *
21251- * Copy data from kernel space to user space.
21252- *
21253- * Returns number of bytes that could not be copied.
21254- * On success, this will be zero.
21255- */
21256-unsigned long
21257-copy_to_user(void __user *to, const void *from, unsigned long n)
21258-{
21259- if (access_ok(VERIFY_WRITE, to, n))
21260- n = __copy_to_user(to, from, n);
21261- return n;
21262-}
21263-EXPORT_SYMBOL(copy_to_user);
21264-
21265-/**
21266- * copy_from_user: - Copy a block of data from user space.
21267- * @to: Destination address, in kernel space.
21268- * @from: Source address, in user space.
21269- * @n: Number of bytes to copy.
21270- *
21271- * Context: User context only. This function may sleep.
21272- *
21273- * Copy data from user space to kernel space.
21274- *
21275- * Returns number of bytes that could not be copied.
21276- * On success, this will be zero.
21277- *
21278- * If some data could not be copied, this function will pad the copied
21279- * data to the requested size using zero bytes.
21280- */
21281-unsigned long
21282-_copy_from_user(void *to, const void __user *from, unsigned long n)
21283-{
21284- if (access_ok(VERIFY_READ, from, n))
21285- n = __copy_from_user(to, from, n);
21286- else
21287- memset(to, 0, n);
21288- return n;
21289-}
21290-EXPORT_SYMBOL(_copy_from_user);
21291-
21292 void copy_from_user_overflow(void)
21293 {
21294 WARN(1, "Buffer overflow detected!\n");
21295 }
21296 EXPORT_SYMBOL(copy_from_user_overflow);
21297+
21298+void copy_to_user_overflow(void)
21299+{
21300+ WARN(1, "Buffer overflow detected!\n");
21301+}
21302+EXPORT_SYMBOL(copy_to_user_overflow);
21303+
21304+#ifdef CONFIG_PAX_MEMORY_UDEREF
21305+void __set_fs(mm_segment_t x)
21306+{
21307+ switch (x.seg) {
21308+ case 0:
21309+ loadsegment(gs, 0);
21310+ break;
21311+ case TASK_SIZE_MAX:
21312+ loadsegment(gs, __USER_DS);
21313+ break;
21314+ case -1UL:
21315+ loadsegment(gs, __KERNEL_DS);
21316+ break;
21317+ default:
21318+ BUG();
21319+ }
21320+ return;
21321+}
21322+EXPORT_SYMBOL(__set_fs);
21323+
21324+void set_fs(mm_segment_t x)
21325+{
21326+ current_thread_info()->addr_limit = x;
21327+ __set_fs(x);
21328+}
21329+EXPORT_SYMBOL(set_fs);
21330+#endif
21331diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21332index b7c2849..8633ad8 100644
21333--- a/arch/x86/lib/usercopy_64.c
21334+++ b/arch/x86/lib/usercopy_64.c
21335@@ -42,6 +42,12 @@ long
21336 __strncpy_from_user(char *dst, const char __user *src, long count)
21337 {
21338 long res;
21339+
21340+#ifdef CONFIG_PAX_MEMORY_UDEREF
21341+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21342+ src += PAX_USER_SHADOW_BASE;
21343+#endif
21344+
21345 __do_strncpy_from_user(dst, src, count, res);
21346 return res;
21347 }
21348@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21349 {
21350 long __d0;
21351 might_fault();
21352+
21353+#ifdef CONFIG_PAX_MEMORY_UDEREF
21354+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21355+ addr += PAX_USER_SHADOW_BASE;
21356+#endif
21357+
21358 /* no memory constraint because it doesn't change any memory gcc knows
21359 about */
21360 asm volatile(
21361@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21362 }
21363 EXPORT_SYMBOL(strlen_user);
21364
21365-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21366+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21367 {
21368- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21369- return copy_user_generic((__force void *)to, (__force void *)from, len);
21370- }
21371- return len;
21372+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21373+
21374+#ifdef CONFIG_PAX_MEMORY_UDEREF
21375+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21376+ to += PAX_USER_SHADOW_BASE;
21377+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21378+ from += PAX_USER_SHADOW_BASE;
21379+#endif
21380+
21381+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21382+ }
21383+ return len;
21384 }
21385 EXPORT_SYMBOL(copy_in_user);
21386
21387@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21388 * it is not necessary to optimize tail handling.
21389 */
21390 unsigned long
21391-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21392+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21393 {
21394 char c;
21395 unsigned zero_len;
21396diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21397index d0474ad..36e9257 100644
21398--- a/arch/x86/mm/extable.c
21399+++ b/arch/x86/mm/extable.c
21400@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21401 const struct exception_table_entry *fixup;
21402
21403 #ifdef CONFIG_PNPBIOS
21404- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21405+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21406 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21407 extern u32 pnp_bios_is_utter_crap;
21408 pnp_bios_is_utter_crap = 1;
21409diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21410index 5db0490..13bd09c 100644
21411--- a/arch/x86/mm/fault.c
21412+++ b/arch/x86/mm/fault.c
21413@@ -13,11 +13,18 @@
21414 #include <linux/perf_event.h> /* perf_sw_event */
21415 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21416 #include <linux/prefetch.h> /* prefetchw */
21417+#include <linux/unistd.h>
21418+#include <linux/compiler.h>
21419
21420 #include <asm/traps.h> /* dotraplinkage, ... */
21421 #include <asm/pgalloc.h> /* pgd_*(), ... */
21422 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21423 #include <asm/fixmap.h> /* VSYSCALL_START */
21424+#include <asm/tlbflush.h>
21425+
21426+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21427+#include <asm/stacktrace.h>
21428+#endif
21429
21430 /*
21431 * Page fault error code bits:
21432@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21433 int ret = 0;
21434
21435 /* kprobe_running() needs smp_processor_id() */
21436- if (kprobes_built_in() && !user_mode_vm(regs)) {
21437+ if (kprobes_built_in() && !user_mode(regs)) {
21438 preempt_disable();
21439 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21440 ret = 1;
21441@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21442 return !instr_lo || (instr_lo>>1) == 1;
21443 case 0x00:
21444 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21445- if (probe_kernel_address(instr, opcode))
21446+ if (user_mode(regs)) {
21447+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21448+ return 0;
21449+ } else if (probe_kernel_address(instr, opcode))
21450 return 0;
21451
21452 *prefetch = (instr_lo == 0xF) &&
21453@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21454 while (instr < max_instr) {
21455 unsigned char opcode;
21456
21457- if (probe_kernel_address(instr, opcode))
21458+ if (user_mode(regs)) {
21459+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21460+ break;
21461+ } else if (probe_kernel_address(instr, opcode))
21462 break;
21463
21464 instr++;
21465@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21466 force_sig_info(si_signo, &info, tsk);
21467 }
21468
21469+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21470+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21471+#endif
21472+
21473+#ifdef CONFIG_PAX_EMUTRAMP
21474+static int pax_handle_fetch_fault(struct pt_regs *regs);
21475+#endif
21476+
21477+#ifdef CONFIG_PAX_PAGEEXEC
21478+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21479+{
21480+ pgd_t *pgd;
21481+ pud_t *pud;
21482+ pmd_t *pmd;
21483+
21484+ pgd = pgd_offset(mm, address);
21485+ if (!pgd_present(*pgd))
21486+ return NULL;
21487+ pud = pud_offset(pgd, address);
21488+ if (!pud_present(*pud))
21489+ return NULL;
21490+ pmd = pmd_offset(pud, address);
21491+ if (!pmd_present(*pmd))
21492+ return NULL;
21493+ return pmd;
21494+}
21495+#endif
21496+
21497 DEFINE_SPINLOCK(pgd_lock);
21498 LIST_HEAD(pgd_list);
21499
21500@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21501 for (address = VMALLOC_START & PMD_MASK;
21502 address >= TASK_SIZE && address < FIXADDR_TOP;
21503 address += PMD_SIZE) {
21504+
21505+#ifdef CONFIG_PAX_PER_CPU_PGD
21506+ unsigned long cpu;
21507+#else
21508 struct page *page;
21509+#endif
21510
21511 spin_lock(&pgd_lock);
21512+
21513+#ifdef CONFIG_PAX_PER_CPU_PGD
21514+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21515+ pgd_t *pgd = get_cpu_pgd(cpu);
21516+ pmd_t *ret;
21517+#else
21518 list_for_each_entry(page, &pgd_list, lru) {
21519+ pgd_t *pgd = page_address(page);
21520 spinlock_t *pgt_lock;
21521 pmd_t *ret;
21522
21523@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21524 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21525
21526 spin_lock(pgt_lock);
21527- ret = vmalloc_sync_one(page_address(page), address);
21528+#endif
21529+
21530+ ret = vmalloc_sync_one(pgd, address);
21531+
21532+#ifndef CONFIG_PAX_PER_CPU_PGD
21533 spin_unlock(pgt_lock);
21534+#endif
21535
21536 if (!ret)
21537 break;
21538@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21539 * an interrupt in the middle of a task switch..
21540 */
21541 pgd_paddr = read_cr3();
21542+
21543+#ifdef CONFIG_PAX_PER_CPU_PGD
21544+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21545+#endif
21546+
21547 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21548 if (!pmd_k)
21549 return -1;
21550@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21551 * happen within a race in page table update. In the later
21552 * case just flush:
21553 */
21554+
21555+#ifdef CONFIG_PAX_PER_CPU_PGD
21556+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21557+ pgd = pgd_offset_cpu(smp_processor_id(), address);
21558+#else
21559 pgd = pgd_offset(current->active_mm, address);
21560+#endif
21561+
21562 pgd_ref = pgd_offset_k(address);
21563 if (pgd_none(*pgd_ref))
21564 return -1;
21565@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21566 static int is_errata100(struct pt_regs *regs, unsigned long address)
21567 {
21568 #ifdef CONFIG_X86_64
21569- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21570+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21571 return 1;
21572 #endif
21573 return 0;
21574@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21575 }
21576
21577 static const char nx_warning[] = KERN_CRIT
21578-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21579+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21580
21581 static void
21582 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21583@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21584 if (!oops_may_print())
21585 return;
21586
21587- if (error_code & PF_INSTR) {
21588+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21589 unsigned int level;
21590
21591 pte_t *pte = lookup_address(address, &level);
21592
21593 if (pte && pte_present(*pte) && !pte_exec(*pte))
21594- printk(nx_warning, current_uid());
21595+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21596 }
21597
21598+#ifdef CONFIG_PAX_KERNEXEC
21599+ if (init_mm.start_code <= address && address < init_mm.end_code) {
21600+ if (current->signal->curr_ip)
21601+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21602+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21603+ else
21604+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21605+ current->comm, task_pid_nr(current), current_uid(), current_euid());
21606+ }
21607+#endif
21608+
21609 printk(KERN_ALERT "BUG: unable to handle kernel ");
21610 if (address < PAGE_SIZE)
21611 printk(KERN_CONT "NULL pointer dereference");
21612@@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21613 }
21614 #endif
21615
21616+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21617+ if (pax_is_fetch_fault(regs, error_code, address)) {
21618+
21619+#ifdef CONFIG_PAX_EMUTRAMP
21620+ switch (pax_handle_fetch_fault(regs)) {
21621+ case 2:
21622+ return;
21623+ }
21624+#endif
21625+
21626+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21627+ do_group_exit(SIGKILL);
21628+ }
21629+#endif
21630+
21631 if (unlikely(show_unhandled_signals))
21632 show_signal_msg(regs, error_code, address, tsk);
21633
21634@@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21635 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21636 printk(KERN_ERR
21637 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21638- tsk->comm, tsk->pid, address);
21639+ tsk->comm, task_pid_nr(tsk), address);
21640 code = BUS_MCEERR_AR;
21641 }
21642 #endif
21643@@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21644 return 1;
21645 }
21646
21647+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21648+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21649+{
21650+ pte_t *pte;
21651+ pmd_t *pmd;
21652+ spinlock_t *ptl;
21653+ unsigned char pte_mask;
21654+
21655+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21656+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
21657+ return 0;
21658+
21659+ /* PaX: it's our fault, let's handle it if we can */
21660+
21661+ /* PaX: take a look at read faults before acquiring any locks */
21662+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21663+ /* instruction fetch attempt from a protected page in user mode */
21664+ up_read(&mm->mmap_sem);
21665+
21666+#ifdef CONFIG_PAX_EMUTRAMP
21667+ switch (pax_handle_fetch_fault(regs)) {
21668+ case 2:
21669+ return 1;
21670+ }
21671+#endif
21672+
21673+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21674+ do_group_exit(SIGKILL);
21675+ }
21676+
21677+ pmd = pax_get_pmd(mm, address);
21678+ if (unlikely(!pmd))
21679+ return 0;
21680+
21681+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21682+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21683+ pte_unmap_unlock(pte, ptl);
21684+ return 0;
21685+ }
21686+
21687+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21688+ /* write attempt to a protected page in user mode */
21689+ pte_unmap_unlock(pte, ptl);
21690+ return 0;
21691+ }
21692+
21693+#ifdef CONFIG_SMP
21694+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21695+#else
21696+ if (likely(address > get_limit(regs->cs)))
21697+#endif
21698+ {
21699+ set_pte(pte, pte_mkread(*pte));
21700+ __flush_tlb_one(address);
21701+ pte_unmap_unlock(pte, ptl);
21702+ up_read(&mm->mmap_sem);
21703+ return 1;
21704+ }
21705+
21706+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21707+
21708+ /*
21709+ * PaX: fill DTLB with user rights and retry
21710+ */
21711+ __asm__ __volatile__ (
21712+ "orb %2,(%1)\n"
21713+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21714+/*
21715+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21716+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21717+ * page fault when examined during a TLB load attempt. this is true not only
21718+ * for PTEs holding a non-present entry but also present entries that will
21719+ * raise a page fault (such as those set up by PaX, or the copy-on-write
21720+ * mechanism). in effect it means that we do *not* need to flush the TLBs
21721+ * for our target pages since their PTEs are simply not in the TLBs at all.
21722+
21723+ * the best thing in omitting it is that we gain around 15-20% speed in the
21724+ * fast path of the page fault handler and can get rid of tracing since we
21725+ * can no longer flush unintended entries.
21726+ */
21727+ "invlpg (%0)\n"
21728+#endif
21729+ __copyuser_seg"testb $0,(%0)\n"
21730+ "xorb %3,(%1)\n"
21731+ :
21732+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21733+ : "memory", "cc");
21734+ pte_unmap_unlock(pte, ptl);
21735+ up_read(&mm->mmap_sem);
21736+ return 1;
21737+}
21738+#endif
21739+
21740 /*
21741 * Handle a spurious fault caused by a stale TLB entry.
21742 *
21743@@ -962,6 +1151,9 @@ int show_unhandled_signals = 1;
21744 static inline int
21745 access_error(unsigned long error_code, struct vm_area_struct *vma)
21746 {
21747+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21748+ return 1;
21749+
21750 if (error_code & PF_WRITE) {
21751 /* write, present and write, not present: */
21752 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21753@@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21754 {
21755 struct vm_area_struct *vma;
21756 struct task_struct *tsk;
21757- unsigned long address;
21758 struct mm_struct *mm;
21759 int fault;
21760 int write = error_code & PF_WRITE;
21761 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
21762 (write ? FAULT_FLAG_WRITE : 0);
21763
21764- tsk = current;
21765- mm = tsk->mm;
21766-
21767 /* Get the faulting address: */
21768- address = read_cr2();
21769+ unsigned long address = read_cr2();
21770+
21771+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21772+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21773+ if (!search_exception_tables(regs->ip)) {
21774+ bad_area_nosemaphore(regs, error_code, address);
21775+ return;
21776+ }
21777+ if (address < PAX_USER_SHADOW_BASE) {
21778+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21779+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
21780+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21781+ } else
21782+ address -= PAX_USER_SHADOW_BASE;
21783+ }
21784+#endif
21785+
21786+ tsk = current;
21787+ mm = tsk->mm;
21788
21789 /*
21790 * Detect and handle instructions that would cause a page fault for
21791@@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21792 * User-mode registers count as a user access even for any
21793 * potential system fault or CPU buglet:
21794 */
21795- if (user_mode_vm(regs)) {
21796+ if (user_mode(regs)) {
21797 local_irq_enable();
21798 error_code |= PF_USER;
21799 } else {
21800@@ -1122,6 +1328,11 @@ retry:
21801 might_sleep();
21802 }
21803
21804+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21805+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21806+ return;
21807+#endif
21808+
21809 vma = find_vma(mm, address);
21810 if (unlikely(!vma)) {
21811 bad_area(regs, error_code, address);
21812@@ -1133,18 +1344,24 @@ retry:
21813 bad_area(regs, error_code, address);
21814 return;
21815 }
21816- if (error_code & PF_USER) {
21817- /*
21818- * Accessing the stack below %sp is always a bug.
21819- * The large cushion allows instructions like enter
21820- * and pusha to work. ("enter $65535, $31" pushes
21821- * 32 pointers and then decrements %sp by 65535.)
21822- */
21823- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21824- bad_area(regs, error_code, address);
21825- return;
21826- }
21827+ /*
21828+ * Accessing the stack below %sp is always a bug.
21829+ * The large cushion allows instructions like enter
21830+ * and pusha to work. ("enter $65535, $31" pushes
21831+ * 32 pointers and then decrements %sp by 65535.)
21832+ */
21833+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21834+ bad_area(regs, error_code, address);
21835+ return;
21836 }
21837+
21838+#ifdef CONFIG_PAX_SEGMEXEC
21839+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21840+ bad_area(regs, error_code, address);
21841+ return;
21842+ }
21843+#endif
21844+
21845 if (unlikely(expand_stack(vma, address))) {
21846 bad_area(regs, error_code, address);
21847 return;
21848@@ -1199,3 +1416,292 @@ good_area:
21849
21850 up_read(&mm->mmap_sem);
21851 }
21852+
21853+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21854+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
21855+{
21856+ struct mm_struct *mm = current->mm;
21857+ unsigned long ip = regs->ip;
21858+
21859+ if (v8086_mode(regs))
21860+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21861+
21862+#ifdef CONFIG_PAX_PAGEEXEC
21863+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
21864+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
21865+ return true;
21866+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
21867+ return true;
21868+ return false;
21869+ }
21870+#endif
21871+
21872+#ifdef CONFIG_PAX_SEGMEXEC
21873+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
21874+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
21875+ return true;
21876+ return false;
21877+ }
21878+#endif
21879+
21880+ return false;
21881+}
21882+#endif
21883+
21884+#ifdef CONFIG_PAX_EMUTRAMP
21885+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21886+{
21887+ int err;
21888+
21889+ do { /* PaX: libffi trampoline emulation */
21890+ unsigned char mov, jmp;
21891+ unsigned int addr1, addr2;
21892+
21893+#ifdef CONFIG_X86_64
21894+ if ((regs->ip + 9) >> 32)
21895+ break;
21896+#endif
21897+
21898+ err = get_user(mov, (unsigned char __user *)regs->ip);
21899+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21900+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21901+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21902+
21903+ if (err)
21904+ break;
21905+
21906+ if (mov == 0xB8 && jmp == 0xE9) {
21907+ regs->ax = addr1;
21908+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21909+ return 2;
21910+ }
21911+ } while (0);
21912+
21913+ do { /* PaX: gcc trampoline emulation #1 */
21914+ unsigned char mov1, mov2;
21915+ unsigned short jmp;
21916+ unsigned int addr1, addr2;
21917+
21918+#ifdef CONFIG_X86_64
21919+ if ((regs->ip + 11) >> 32)
21920+ break;
21921+#endif
21922+
21923+ err = get_user(mov1, (unsigned char __user *)regs->ip);
21924+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21925+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21926+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21927+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21928+
21929+ if (err)
21930+ break;
21931+
21932+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21933+ regs->cx = addr1;
21934+ regs->ax = addr2;
21935+ regs->ip = addr2;
21936+ return 2;
21937+ }
21938+ } while (0);
21939+
21940+ do { /* PaX: gcc trampoline emulation #2 */
21941+ unsigned char mov, jmp;
21942+ unsigned int addr1, addr2;
21943+
21944+#ifdef CONFIG_X86_64
21945+ if ((regs->ip + 9) >> 32)
21946+ break;
21947+#endif
21948+
21949+ err = get_user(mov, (unsigned char __user *)regs->ip);
21950+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21951+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21952+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21953+
21954+ if (err)
21955+ break;
21956+
21957+ if (mov == 0xB9 && jmp == 0xE9) {
21958+ regs->cx = addr1;
21959+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21960+ return 2;
21961+ }
21962+ } while (0);
21963+
21964+ return 1; /* PaX in action */
21965+}
21966+
21967+#ifdef CONFIG_X86_64
21968+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21969+{
21970+ int err;
21971+
21972+ do { /* PaX: libffi trampoline emulation */
21973+ unsigned short mov1, mov2, jmp1;
21974+ unsigned char stcclc, jmp2;
21975+ unsigned long addr1, addr2;
21976+
21977+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21978+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21979+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21980+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21981+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
21982+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
21983+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
21984+
21985+ if (err)
21986+ break;
21987+
21988+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21989+ regs->r11 = addr1;
21990+ regs->r10 = addr2;
21991+ if (stcclc == 0xF8)
21992+ regs->flags &= ~X86_EFLAGS_CF;
21993+ else
21994+ regs->flags |= X86_EFLAGS_CF;
21995+ regs->ip = addr1;
21996+ return 2;
21997+ }
21998+ } while (0);
21999+
22000+ do { /* PaX: gcc trampoline emulation #1 */
22001+ unsigned short mov1, mov2, jmp1;
22002+ unsigned char jmp2;
22003+ unsigned int addr1;
22004+ unsigned long addr2;
22005+
22006+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22007+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
22008+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
22009+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
22010+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
22011+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
22012+
22013+ if (err)
22014+ break;
22015+
22016+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22017+ regs->r11 = addr1;
22018+ regs->r10 = addr2;
22019+ regs->ip = addr1;
22020+ return 2;
22021+ }
22022+ } while (0);
22023+
22024+ do { /* PaX: gcc trampoline emulation #2 */
22025+ unsigned short mov1, mov2, jmp1;
22026+ unsigned char jmp2;
22027+ unsigned long addr1, addr2;
22028+
22029+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22030+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22031+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22032+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22033+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
22034+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
22035+
22036+ if (err)
22037+ break;
22038+
22039+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22040+ regs->r11 = addr1;
22041+ regs->r10 = addr2;
22042+ regs->ip = addr1;
22043+ return 2;
22044+ }
22045+ } while (0);
22046+
22047+ return 1; /* PaX in action */
22048+}
22049+#endif
22050+
22051+/*
22052+ * PaX: decide what to do with offenders (regs->ip = fault address)
22053+ *
22054+ * returns 1 when task should be killed
22055+ * 2 when gcc trampoline was detected
22056+ */
22057+static int pax_handle_fetch_fault(struct pt_regs *regs)
22058+{
22059+ if (v8086_mode(regs))
22060+ return 1;
22061+
22062+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
22063+ return 1;
22064+
22065+#ifdef CONFIG_X86_32
22066+ return pax_handle_fetch_fault_32(regs);
22067+#else
22068+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22069+ return pax_handle_fetch_fault_32(regs);
22070+ else
22071+ return pax_handle_fetch_fault_64(regs);
22072+#endif
22073+}
22074+#endif
22075+
22076+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22077+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
22078+{
22079+ long i;
22080+
22081+ printk(KERN_ERR "PAX: bytes at PC: ");
22082+ for (i = 0; i < 20; i++) {
22083+ unsigned char c;
22084+ if (get_user(c, (unsigned char __force_user *)pc+i))
22085+ printk(KERN_CONT "?? ");
22086+ else
22087+ printk(KERN_CONT "%02x ", c);
22088+ }
22089+ printk("\n");
22090+
22091+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22092+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
22093+ unsigned long c;
22094+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
22095+#ifdef CONFIG_X86_32
22096+ printk(KERN_CONT "???????? ");
22097+#else
22098+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22099+ printk(KERN_CONT "???????? ???????? ");
22100+ else
22101+ printk(KERN_CONT "???????????????? ");
22102+#endif
22103+ } else {
22104+#ifdef CONFIG_X86_64
22105+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22106+ printk(KERN_CONT "%08x ", (unsigned int)c);
22107+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22108+ } else
22109+#endif
22110+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22111+ }
22112+ }
22113+ printk("\n");
22114+}
22115+#endif
22116+
22117+/**
22118+ * probe_kernel_write(): safely attempt to write to a location
22119+ * @dst: address to write to
22120+ * @src: pointer to the data that shall be written
22121+ * @size: size of the data chunk
22122+ *
22123+ * Safely write to address @dst from the buffer at @src. If a kernel fault
22124+ * happens, handle that and return -EFAULT.
22125+ */
22126+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22127+{
22128+ long ret;
22129+ mm_segment_t old_fs = get_fs();
22130+
22131+ set_fs(KERNEL_DS);
22132+ pagefault_disable();
22133+ pax_open_kernel();
22134+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22135+ pax_close_kernel();
22136+ pagefault_enable();
22137+ set_fs(old_fs);
22138+
22139+ return ret ? -EFAULT : 0;
22140+}
22141diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22142index dd74e46..7d26398 100644
22143--- a/arch/x86/mm/gup.c
22144+++ b/arch/x86/mm/gup.c
22145@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22146 addr = start;
22147 len = (unsigned long) nr_pages << PAGE_SHIFT;
22148 end = start + len;
22149- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22150+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22151 (void __user *)start, len)))
22152 return 0;
22153
22154diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22155index f4f29b1..5cac4fb 100644
22156--- a/arch/x86/mm/highmem_32.c
22157+++ b/arch/x86/mm/highmem_32.c
22158@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22159 idx = type + KM_TYPE_NR*smp_processor_id();
22160 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22161 BUG_ON(!pte_none(*(kmap_pte-idx)));
22162+
22163+ pax_open_kernel();
22164 set_pte(kmap_pte-idx, mk_pte(page, prot));
22165+ pax_close_kernel();
22166+
22167 arch_flush_lazy_mmu_mode();
22168
22169 return (void *)vaddr;
22170diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22171index f581a18..29efd37 100644
22172--- a/arch/x86/mm/hugetlbpage.c
22173+++ b/arch/x86/mm/hugetlbpage.c
22174@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22175 struct hstate *h = hstate_file(file);
22176 struct mm_struct *mm = current->mm;
22177 struct vm_area_struct *vma;
22178- unsigned long start_addr;
22179+ unsigned long start_addr, pax_task_size = TASK_SIZE;
22180+
22181+#ifdef CONFIG_PAX_SEGMEXEC
22182+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22183+ pax_task_size = SEGMEXEC_TASK_SIZE;
22184+#endif
22185+
22186+ pax_task_size -= PAGE_SIZE;
22187
22188 if (len > mm->cached_hole_size) {
22189- start_addr = mm->free_area_cache;
22190+ start_addr = mm->free_area_cache;
22191 } else {
22192- start_addr = TASK_UNMAPPED_BASE;
22193- mm->cached_hole_size = 0;
22194+ start_addr = mm->mmap_base;
22195+ mm->cached_hole_size = 0;
22196 }
22197
22198 full_search:
22199@@ -280,26 +287,27 @@ full_search:
22200
22201 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22202 /* At this point: (!vma || addr < vma->vm_end). */
22203- if (TASK_SIZE - len < addr) {
22204+ if (pax_task_size - len < addr) {
22205 /*
22206 * Start a new search - just in case we missed
22207 * some holes.
22208 */
22209- if (start_addr != TASK_UNMAPPED_BASE) {
22210- start_addr = TASK_UNMAPPED_BASE;
22211+ if (start_addr != mm->mmap_base) {
22212+ start_addr = mm->mmap_base;
22213 mm->cached_hole_size = 0;
22214 goto full_search;
22215 }
22216 return -ENOMEM;
22217 }
22218- if (!vma || addr + len <= vma->vm_start) {
22219- mm->free_area_cache = addr + len;
22220- return addr;
22221- }
22222+ if (check_heap_stack_gap(vma, addr, len))
22223+ break;
22224 if (addr + mm->cached_hole_size < vma->vm_start)
22225 mm->cached_hole_size = vma->vm_start - addr;
22226 addr = ALIGN(vma->vm_end, huge_page_size(h));
22227 }
22228+
22229+ mm->free_area_cache = addr + len;
22230+ return addr;
22231 }
22232
22233 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22234@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22235 {
22236 struct hstate *h = hstate_file(file);
22237 struct mm_struct *mm = current->mm;
22238- struct vm_area_struct *vma, *prev_vma;
22239- unsigned long base = mm->mmap_base, addr = addr0;
22240+ struct vm_area_struct *vma;
22241+ unsigned long base = mm->mmap_base, addr;
22242 unsigned long largest_hole = mm->cached_hole_size;
22243- int first_time = 1;
22244
22245 /* don't allow allocations above current base */
22246 if (mm->free_area_cache > base)
22247@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22248 largest_hole = 0;
22249 mm->free_area_cache = base;
22250 }
22251-try_again:
22252+
22253 /* make sure it can fit in the remaining address space */
22254 if (mm->free_area_cache < len)
22255 goto fail;
22256
22257 /* either no address requested or can't fit in requested address hole */
22258- addr = (mm->free_area_cache - len) & huge_page_mask(h);
22259+ addr = (mm->free_area_cache - len);
22260 do {
22261+ addr &= huge_page_mask(h);
22262+ vma = find_vma(mm, addr);
22263 /*
22264 * Lookup failure means no vma is above this address,
22265 * i.e. return with success:
22266- */
22267- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22268- return addr;
22269-
22270- /*
22271 * new region fits between prev_vma->vm_end and
22272 * vma->vm_start, use it:
22273 */
22274- if (addr + len <= vma->vm_start &&
22275- (!prev_vma || (addr >= prev_vma->vm_end))) {
22276+ if (check_heap_stack_gap(vma, addr, len)) {
22277 /* remember the address as a hint for next time */
22278- mm->cached_hole_size = largest_hole;
22279- return (mm->free_area_cache = addr);
22280- } else {
22281- /* pull free_area_cache down to the first hole */
22282- if (mm->free_area_cache == vma->vm_end) {
22283- mm->free_area_cache = vma->vm_start;
22284- mm->cached_hole_size = largest_hole;
22285- }
22286+ mm->cached_hole_size = largest_hole;
22287+ return (mm->free_area_cache = addr);
22288+ }
22289+ /* pull free_area_cache down to the first hole */
22290+ if (mm->free_area_cache == vma->vm_end) {
22291+ mm->free_area_cache = vma->vm_start;
22292+ mm->cached_hole_size = largest_hole;
22293 }
22294
22295 /* remember the largest hole we saw so far */
22296 if (addr + largest_hole < vma->vm_start)
22297- largest_hole = vma->vm_start - addr;
22298+ largest_hole = vma->vm_start - addr;
22299
22300 /* try just below the current vma->vm_start */
22301- addr = (vma->vm_start - len) & huge_page_mask(h);
22302- } while (len <= vma->vm_start);
22303+ addr = skip_heap_stack_gap(vma, len);
22304+ } while (!IS_ERR_VALUE(addr));
22305
22306 fail:
22307 /*
22308- * if hint left us with no space for the requested
22309- * mapping then try again:
22310- */
22311- if (first_time) {
22312- mm->free_area_cache = base;
22313- largest_hole = 0;
22314- first_time = 0;
22315- goto try_again;
22316- }
22317- /*
22318 * A failed mmap() very likely causes application failure,
22319 * so fall back to the bottom-up function here. This scenario
22320 * can happen with large stack limits and large mmap()
22321 * allocations.
22322 */
22323- mm->free_area_cache = TASK_UNMAPPED_BASE;
22324+
22325+#ifdef CONFIG_PAX_SEGMEXEC
22326+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22327+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22328+ else
22329+#endif
22330+
22331+ mm->mmap_base = TASK_UNMAPPED_BASE;
22332+
22333+#ifdef CONFIG_PAX_RANDMMAP
22334+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22335+ mm->mmap_base += mm->delta_mmap;
22336+#endif
22337+
22338+ mm->free_area_cache = mm->mmap_base;
22339 mm->cached_hole_size = ~0UL;
22340 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22341 len, pgoff, flags);
22342@@ -386,6 +392,7 @@ fail:
22343 /*
22344 * Restore the topdown base:
22345 */
22346+ mm->mmap_base = base;
22347 mm->free_area_cache = base;
22348 mm->cached_hole_size = ~0UL;
22349
22350@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22351 struct hstate *h = hstate_file(file);
22352 struct mm_struct *mm = current->mm;
22353 struct vm_area_struct *vma;
22354+ unsigned long pax_task_size = TASK_SIZE;
22355
22356 if (len & ~huge_page_mask(h))
22357 return -EINVAL;
22358- if (len > TASK_SIZE)
22359+
22360+#ifdef CONFIG_PAX_SEGMEXEC
22361+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22362+ pax_task_size = SEGMEXEC_TASK_SIZE;
22363+#endif
22364+
22365+ pax_task_size -= PAGE_SIZE;
22366+
22367+ if (len > pax_task_size)
22368 return -ENOMEM;
22369
22370 if (flags & MAP_FIXED) {
22371@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22372 if (addr) {
22373 addr = ALIGN(addr, huge_page_size(h));
22374 vma = find_vma(mm, addr);
22375- if (TASK_SIZE - len >= addr &&
22376- (!vma || addr + len <= vma->vm_start))
22377+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22378 return addr;
22379 }
22380 if (mm->get_unmapped_area == arch_get_unmapped_area)
22381diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22382index 87488b9..a55509f 100644
22383--- a/arch/x86/mm/init.c
22384+++ b/arch/x86/mm/init.c
22385@@ -15,6 +15,7 @@
22386 #include <asm/tlbflush.h>
22387 #include <asm/tlb.h>
22388 #include <asm/proto.h>
22389+#include <asm/desc.h>
22390
22391 unsigned long __initdata pgt_buf_start;
22392 unsigned long __meminitdata pgt_buf_end;
22393@@ -31,7 +32,7 @@ int direct_gbpages
22394 static void __init find_early_table_space(unsigned long end, int use_pse,
22395 int use_gbpages)
22396 {
22397- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22398+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22399 phys_addr_t base;
22400
22401 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22402@@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22403 */
22404 int devmem_is_allowed(unsigned long pagenr)
22405 {
22406+#ifdef CONFIG_GRKERNSEC_KMEM
22407+ /* allow BDA */
22408+ if (!pagenr)
22409+ return 1;
22410+ /* allow EBDA */
22411+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22412+ return 1;
22413+#else
22414+ if (!pagenr)
22415+ return 1;
22416+#ifdef CONFIG_VM86
22417+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22418+ return 1;
22419+#endif
22420+#endif
22421+
22422+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22423+ return 1;
22424+#ifdef CONFIG_GRKERNSEC_KMEM
22425+ /* throw out everything else below 1MB */
22426 if (pagenr <= 256)
22427- return 1;
22428+ return 0;
22429+#endif
22430 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22431 return 0;
22432 if (!page_is_ram(pagenr))
22433@@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22434
22435 void free_initmem(void)
22436 {
22437+
22438+#ifdef CONFIG_PAX_KERNEXEC
22439+#ifdef CONFIG_X86_32
22440+ /* PaX: limit KERNEL_CS to actual size */
22441+ unsigned long addr, limit;
22442+ struct desc_struct d;
22443+ int cpu;
22444+
22445+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22446+ limit = (limit - 1UL) >> PAGE_SHIFT;
22447+
22448+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22449+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22450+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22451+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22452+ }
22453+
22454+ /* PaX: make KERNEL_CS read-only */
22455+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22456+ if (!paravirt_enabled())
22457+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22458+/*
22459+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22460+ pgd = pgd_offset_k(addr);
22461+ pud = pud_offset(pgd, addr);
22462+ pmd = pmd_offset(pud, addr);
22463+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22464+ }
22465+*/
22466+#ifdef CONFIG_X86_PAE
22467+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22468+/*
22469+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22470+ pgd = pgd_offset_k(addr);
22471+ pud = pud_offset(pgd, addr);
22472+ pmd = pmd_offset(pud, addr);
22473+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22474+ }
22475+*/
22476+#endif
22477+
22478+#ifdef CONFIG_MODULES
22479+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22480+#endif
22481+
22482+#else
22483+ pgd_t *pgd;
22484+ pud_t *pud;
22485+ pmd_t *pmd;
22486+ unsigned long addr, end;
22487+
22488+ /* PaX: make kernel code/rodata read-only, rest non-executable */
22489+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22490+ pgd = pgd_offset_k(addr);
22491+ pud = pud_offset(pgd, addr);
22492+ pmd = pmd_offset(pud, addr);
22493+ if (!pmd_present(*pmd))
22494+ continue;
22495+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22496+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22497+ else
22498+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22499+ }
22500+
22501+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22502+ end = addr + KERNEL_IMAGE_SIZE;
22503+ for (; addr < end; addr += PMD_SIZE) {
22504+ pgd = pgd_offset_k(addr);
22505+ pud = pud_offset(pgd, addr);
22506+ pmd = pmd_offset(pud, addr);
22507+ if (!pmd_present(*pmd))
22508+ continue;
22509+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22510+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22511+ }
22512+#endif
22513+
22514+ flush_tlb_all();
22515+#endif
22516+
22517 free_init_pages("unused kernel memory",
22518 (unsigned long)(&__init_begin),
22519 (unsigned long)(&__init_end));
22520diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22521index 29f7c6d..b46b35b 100644
22522--- a/arch/x86/mm/init_32.c
22523+++ b/arch/x86/mm/init_32.c
22524@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22525 }
22526
22527 /*
22528- * Creates a middle page table and puts a pointer to it in the
22529- * given global directory entry. This only returns the gd entry
22530- * in non-PAE compilation mode, since the middle layer is folded.
22531- */
22532-static pmd_t * __init one_md_table_init(pgd_t *pgd)
22533-{
22534- pud_t *pud;
22535- pmd_t *pmd_table;
22536-
22537-#ifdef CONFIG_X86_PAE
22538- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22539- if (after_bootmem)
22540- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22541- else
22542- pmd_table = (pmd_t *)alloc_low_page();
22543- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22544- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22545- pud = pud_offset(pgd, 0);
22546- BUG_ON(pmd_table != pmd_offset(pud, 0));
22547-
22548- return pmd_table;
22549- }
22550-#endif
22551- pud = pud_offset(pgd, 0);
22552- pmd_table = pmd_offset(pud, 0);
22553-
22554- return pmd_table;
22555-}
22556-
22557-/*
22558 * Create a page table and place a pointer to it in a middle page
22559 * directory entry:
22560 */
22561@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22562 page_table = (pte_t *)alloc_low_page();
22563
22564 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22565+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22566+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22567+#else
22568 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22569+#endif
22570 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22571 }
22572
22573 return pte_offset_kernel(pmd, 0);
22574 }
22575
22576+static pmd_t * __init one_md_table_init(pgd_t *pgd)
22577+{
22578+ pud_t *pud;
22579+ pmd_t *pmd_table;
22580+
22581+ pud = pud_offset(pgd, 0);
22582+ pmd_table = pmd_offset(pud, 0);
22583+
22584+ return pmd_table;
22585+}
22586+
22587 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22588 {
22589 int pgd_idx = pgd_index(vaddr);
22590@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22591 int pgd_idx, pmd_idx;
22592 unsigned long vaddr;
22593 pgd_t *pgd;
22594+ pud_t *pud;
22595 pmd_t *pmd;
22596 pte_t *pte = NULL;
22597
22598@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22599 pgd = pgd_base + pgd_idx;
22600
22601 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22602- pmd = one_md_table_init(pgd);
22603- pmd = pmd + pmd_index(vaddr);
22604+ pud = pud_offset(pgd, vaddr);
22605+ pmd = pmd_offset(pud, vaddr);
22606+
22607+#ifdef CONFIG_X86_PAE
22608+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22609+#endif
22610+
22611 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22612 pmd++, pmd_idx++) {
22613 pte = page_table_kmap_check(one_page_table_init(pmd),
22614@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22615 }
22616 }
22617
22618-static inline int is_kernel_text(unsigned long addr)
22619+static inline int is_kernel_text(unsigned long start, unsigned long end)
22620 {
22621- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22622- return 1;
22623- return 0;
22624+ if ((start > ktla_ktva((unsigned long)_etext) ||
22625+ end <= ktla_ktva((unsigned long)_stext)) &&
22626+ (start > ktla_ktva((unsigned long)_einittext) ||
22627+ end <= ktla_ktva((unsigned long)_sinittext)) &&
22628+
22629+#ifdef CONFIG_ACPI_SLEEP
22630+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22631+#endif
22632+
22633+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22634+ return 0;
22635+ return 1;
22636 }
22637
22638 /*
22639@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22640 unsigned long last_map_addr = end;
22641 unsigned long start_pfn, end_pfn;
22642 pgd_t *pgd_base = swapper_pg_dir;
22643- int pgd_idx, pmd_idx, pte_ofs;
22644+ unsigned int pgd_idx, pmd_idx, pte_ofs;
22645 unsigned long pfn;
22646 pgd_t *pgd;
22647+ pud_t *pud;
22648 pmd_t *pmd;
22649 pte_t *pte;
22650 unsigned pages_2m, pages_4k;
22651@@ -281,8 +282,13 @@ repeat:
22652 pfn = start_pfn;
22653 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22654 pgd = pgd_base + pgd_idx;
22655- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22656- pmd = one_md_table_init(pgd);
22657+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22658+ pud = pud_offset(pgd, 0);
22659+ pmd = pmd_offset(pud, 0);
22660+
22661+#ifdef CONFIG_X86_PAE
22662+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22663+#endif
22664
22665 if (pfn >= end_pfn)
22666 continue;
22667@@ -294,14 +300,13 @@ repeat:
22668 #endif
22669 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22670 pmd++, pmd_idx++) {
22671- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22672+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22673
22674 /*
22675 * Map with big pages if possible, otherwise
22676 * create normal page tables:
22677 */
22678 if (use_pse) {
22679- unsigned int addr2;
22680 pgprot_t prot = PAGE_KERNEL_LARGE;
22681 /*
22682 * first pass will use the same initial
22683@@ -311,11 +316,7 @@ repeat:
22684 __pgprot(PTE_IDENT_ATTR |
22685 _PAGE_PSE);
22686
22687- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22688- PAGE_OFFSET + PAGE_SIZE-1;
22689-
22690- if (is_kernel_text(addr) ||
22691- is_kernel_text(addr2))
22692+ if (is_kernel_text(address, address + PMD_SIZE))
22693 prot = PAGE_KERNEL_LARGE_EXEC;
22694
22695 pages_2m++;
22696@@ -332,7 +333,7 @@ repeat:
22697 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22698 pte += pte_ofs;
22699 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22700- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22701+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22702 pgprot_t prot = PAGE_KERNEL;
22703 /*
22704 * first pass will use the same initial
22705@@ -340,7 +341,7 @@ repeat:
22706 */
22707 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22708
22709- if (is_kernel_text(addr))
22710+ if (is_kernel_text(address, address + PAGE_SIZE))
22711 prot = PAGE_KERNEL_EXEC;
22712
22713 pages_4k++;
22714@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22715
22716 pud = pud_offset(pgd, va);
22717 pmd = pmd_offset(pud, va);
22718- if (!pmd_present(*pmd))
22719+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
22720 break;
22721
22722 pte = pte_offset_kernel(pmd, va);
22723@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22724
22725 static void __init pagetable_init(void)
22726 {
22727- pgd_t *pgd_base = swapper_pg_dir;
22728-
22729- permanent_kmaps_init(pgd_base);
22730+ permanent_kmaps_init(swapper_pg_dir);
22731 }
22732
22733-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22734+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22735 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22736
22737 /* user-defined highmem size */
22738@@ -757,6 +756,12 @@ void __init mem_init(void)
22739
22740 pci_iommu_alloc();
22741
22742+#ifdef CONFIG_PAX_PER_CPU_PGD
22743+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22744+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22745+ KERNEL_PGD_PTRS);
22746+#endif
22747+
22748 #ifdef CONFIG_FLATMEM
22749 BUG_ON(!mem_map);
22750 #endif
22751@@ -774,7 +779,7 @@ void __init mem_init(void)
22752 set_highmem_pages_init();
22753
22754 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22755- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22756+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22757 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22758
22759 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22760@@ -815,10 +820,10 @@ void __init mem_init(void)
22761 ((unsigned long)&__init_end -
22762 (unsigned long)&__init_begin) >> 10,
22763
22764- (unsigned long)&_etext, (unsigned long)&_edata,
22765- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22766+ (unsigned long)&_sdata, (unsigned long)&_edata,
22767+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22768
22769- (unsigned long)&_text, (unsigned long)&_etext,
22770+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22771 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22772
22773 /*
22774@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
22775 if (!kernel_set_to_readonly)
22776 return;
22777
22778+ start = ktla_ktva(start);
22779 pr_debug("Set kernel text: %lx - %lx for read write\n",
22780 start, start+size);
22781
22782@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
22783 if (!kernel_set_to_readonly)
22784 return;
22785
22786+ start = ktla_ktva(start);
22787 pr_debug("Set kernel text: %lx - %lx for read only\n",
22788 start, start+size);
22789
22790@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
22791 unsigned long start = PFN_ALIGN(_text);
22792 unsigned long size = PFN_ALIGN(_etext) - start;
22793
22794+ start = ktla_ktva(start);
22795 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22796 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22797 size >> 10);
22798diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
22799index bbaaa00..16dffad 100644
22800--- a/arch/x86/mm/init_64.c
22801+++ b/arch/x86/mm/init_64.c
22802@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
22803 * around without checking the pgd every time.
22804 */
22805
22806-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
22807+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
22808 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22809
22810 int force_personality32;
22811@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22812
22813 for (address = start; address <= end; address += PGDIR_SIZE) {
22814 const pgd_t *pgd_ref = pgd_offset_k(address);
22815+
22816+#ifdef CONFIG_PAX_PER_CPU_PGD
22817+ unsigned long cpu;
22818+#else
22819 struct page *page;
22820+#endif
22821
22822 if (pgd_none(*pgd_ref))
22823 continue;
22824
22825 spin_lock(&pgd_lock);
22826+
22827+#ifdef CONFIG_PAX_PER_CPU_PGD
22828+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22829+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
22830+#else
22831 list_for_each_entry(page, &pgd_list, lru) {
22832 pgd_t *pgd;
22833 spinlock_t *pgt_lock;
22834@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22835 /* the pgt_lock only for Xen */
22836 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
22837 spin_lock(pgt_lock);
22838+#endif
22839
22840 if (pgd_none(*pgd))
22841 set_pgd(pgd, *pgd_ref);
22842@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22843 BUG_ON(pgd_page_vaddr(*pgd)
22844 != pgd_page_vaddr(*pgd_ref));
22845
22846+#ifndef CONFIG_PAX_PER_CPU_PGD
22847 spin_unlock(pgt_lock);
22848+#endif
22849+
22850 }
22851 spin_unlock(&pgd_lock);
22852 }
22853@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
22854 pmd = fill_pmd(pud, vaddr);
22855 pte = fill_pte(pmd, vaddr);
22856
22857+ pax_open_kernel();
22858 set_pte(pte, new_pte);
22859+ pax_close_kernel();
22860
22861 /*
22862 * It's enough to flush this one mapping.
22863@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
22864 pgd = pgd_offset_k((unsigned long)__va(phys));
22865 if (pgd_none(*pgd)) {
22866 pud = (pud_t *) spp_getpage();
22867- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22868- _PAGE_USER));
22869+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22870 }
22871 pud = pud_offset(pgd, (unsigned long)__va(phys));
22872 if (pud_none(*pud)) {
22873 pmd = (pmd_t *) spp_getpage();
22874- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22875- _PAGE_USER));
22876+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22877 }
22878 pmd = pmd_offset(pud, phys);
22879 BUG_ON(!pmd_none(*pmd));
22880@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
22881 if (pfn >= pgt_buf_top)
22882 panic("alloc_low_page: ran out of memory");
22883
22884- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22885+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22886 clear_page(adr);
22887 *phys = pfn * PAGE_SIZE;
22888 return adr;
22889@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
22890
22891 phys = __pa(virt);
22892 left = phys & (PAGE_SIZE - 1);
22893- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22894+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22895 adr = (void *)(((unsigned long)adr) | left);
22896
22897 return adr;
22898@@ -693,6 +707,12 @@ void __init mem_init(void)
22899
22900 pci_iommu_alloc();
22901
22902+#ifdef CONFIG_PAX_PER_CPU_PGD
22903+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22904+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22905+ KERNEL_PGD_PTRS);
22906+#endif
22907+
22908 /* clear_bss() already clear the empty_zero_page */
22909
22910 reservedpages = 0;
22911@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
22912 static struct vm_area_struct gate_vma = {
22913 .vm_start = VSYSCALL_START,
22914 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22915- .vm_page_prot = PAGE_READONLY_EXEC,
22916- .vm_flags = VM_READ | VM_EXEC
22917+ .vm_page_prot = PAGE_READONLY,
22918+ .vm_flags = VM_READ
22919 };
22920
22921 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
22922@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
22923
22924 const char *arch_vma_name(struct vm_area_struct *vma)
22925 {
22926- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22927+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22928 return "[vdso]";
22929 if (vma == &gate_vma)
22930 return "[vsyscall]";
22931diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
22932index 7b179b4..6bd1777 100644
22933--- a/arch/x86/mm/iomap_32.c
22934+++ b/arch/x86/mm/iomap_32.c
22935@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
22936 type = kmap_atomic_idx_push();
22937 idx = type + KM_TYPE_NR * smp_processor_id();
22938 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22939+
22940+ pax_open_kernel();
22941 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22942+ pax_close_kernel();
22943+
22944 arch_flush_lazy_mmu_mode();
22945
22946 return (void *)vaddr;
22947diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
22948index be1ef57..9680edc 100644
22949--- a/arch/x86/mm/ioremap.c
22950+++ b/arch/x86/mm/ioremap.c
22951@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
22952 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
22953 int is_ram = page_is_ram(pfn);
22954
22955- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22956+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22957 return NULL;
22958 WARN_ON_ONCE(is_ram);
22959 }
22960@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_setup(char *str)
22961 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22962
22963 static __initdata int after_paging_init;
22964-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22965+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22966
22967 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22968 {
22969@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
22970 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22971
22972 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22973- memset(bm_pte, 0, sizeof(bm_pte));
22974- pmd_populate_kernel(&init_mm, pmd, bm_pte);
22975+ pmd_populate_user(&init_mm, pmd, bm_pte);
22976
22977 /*
22978 * The boot-ioremap range spans multiple pmds, for which
22979diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
22980index d87dd6d..bf3fa66 100644
22981--- a/arch/x86/mm/kmemcheck/kmemcheck.c
22982+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
22983@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
22984 * memory (e.g. tracked pages)? For now, we need this to avoid
22985 * invoking kmemcheck for PnP BIOS calls.
22986 */
22987- if (regs->flags & X86_VM_MASK)
22988+ if (v8086_mode(regs))
22989 return false;
22990- if (regs->cs != __KERNEL_CS)
22991+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22992 return false;
22993
22994 pte = kmemcheck_pte_lookup(address);
22995diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
22996index 845df68..1d8d29f 100644
22997--- a/arch/x86/mm/mmap.c
22998+++ b/arch/x86/mm/mmap.c
22999@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
23000 * Leave an at least ~128 MB hole with possible stack randomization.
23001 */
23002 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
23003-#define MAX_GAP (TASK_SIZE/6*5)
23004+#define MAX_GAP (pax_task_size/6*5)
23005
23006 static int mmap_is_legacy(void)
23007 {
23008@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
23009 return rnd << PAGE_SHIFT;
23010 }
23011
23012-static unsigned long mmap_base(void)
23013+static unsigned long mmap_base(struct mm_struct *mm)
23014 {
23015 unsigned long gap = rlimit(RLIMIT_STACK);
23016+ unsigned long pax_task_size = TASK_SIZE;
23017+
23018+#ifdef CONFIG_PAX_SEGMEXEC
23019+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23020+ pax_task_size = SEGMEXEC_TASK_SIZE;
23021+#endif
23022
23023 if (gap < MIN_GAP)
23024 gap = MIN_GAP;
23025 else if (gap > MAX_GAP)
23026 gap = MAX_GAP;
23027
23028- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
23029+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
23030 }
23031
23032 /*
23033 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
23034 * does, but not when emulating X86_32
23035 */
23036-static unsigned long mmap_legacy_base(void)
23037+static unsigned long mmap_legacy_base(struct mm_struct *mm)
23038 {
23039- if (mmap_is_ia32())
23040+ if (mmap_is_ia32()) {
23041+
23042+#ifdef CONFIG_PAX_SEGMEXEC
23043+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23044+ return SEGMEXEC_TASK_UNMAPPED_BASE;
23045+ else
23046+#endif
23047+
23048 return TASK_UNMAPPED_BASE;
23049- else
23050+ } else
23051 return TASK_UNMAPPED_BASE + mmap_rnd();
23052 }
23053
23054@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
23055 void arch_pick_mmap_layout(struct mm_struct *mm)
23056 {
23057 if (mmap_is_legacy()) {
23058- mm->mmap_base = mmap_legacy_base();
23059+ mm->mmap_base = mmap_legacy_base(mm);
23060+
23061+#ifdef CONFIG_PAX_RANDMMAP
23062+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23063+ mm->mmap_base += mm->delta_mmap;
23064+#endif
23065+
23066 mm->get_unmapped_area = arch_get_unmapped_area;
23067 mm->unmap_area = arch_unmap_area;
23068 } else {
23069- mm->mmap_base = mmap_base();
23070+ mm->mmap_base = mmap_base(mm);
23071+
23072+#ifdef CONFIG_PAX_RANDMMAP
23073+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23074+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23075+#endif
23076+
23077 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23078 mm->unmap_area = arch_unmap_area_topdown;
23079 }
23080diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23081index de54b9b..799051e 100644
23082--- a/arch/x86/mm/mmio-mod.c
23083+++ b/arch/x86/mm/mmio-mod.c
23084@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
23085 break;
23086 default:
23087 {
23088- unsigned char *ip = (unsigned char *)instptr;
23089+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23090 my_trace->opcode = MMIO_UNKNOWN_OP;
23091 my_trace->width = 0;
23092 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23093@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
23094 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23095 void __iomem *addr)
23096 {
23097- static atomic_t next_id;
23098+ static atomic_unchecked_t next_id;
23099 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23100 /* These are page-unaligned. */
23101 struct mmiotrace_map map = {
23102@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23103 .private = trace
23104 },
23105 .phys = offset,
23106- .id = atomic_inc_return(&next_id)
23107+ .id = atomic_inc_return_unchecked(&next_id)
23108 };
23109 map.map_id = trace->id;
23110
23111diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23112index b008656..773eac2 100644
23113--- a/arch/x86/mm/pageattr-test.c
23114+++ b/arch/x86/mm/pageattr-test.c
23115@@ -36,7 +36,7 @@ enum {
23116
23117 static int pte_testbit(pte_t pte)
23118 {
23119- return pte_flags(pte) & _PAGE_UNUSED1;
23120+ return pte_flags(pte) & _PAGE_CPA_TEST;
23121 }
23122
23123 struct split_state {
23124diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23125index f9e5267..6f6e27f 100644
23126--- a/arch/x86/mm/pageattr.c
23127+++ b/arch/x86/mm/pageattr.c
23128@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23129 */
23130 #ifdef CONFIG_PCI_BIOS
23131 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23132- pgprot_val(forbidden) |= _PAGE_NX;
23133+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23134 #endif
23135
23136 /*
23137@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23138 * Does not cover __inittext since that is gone later on. On
23139 * 64bit we do not enforce !NX on the low mapping
23140 */
23141- if (within(address, (unsigned long)_text, (unsigned long)_etext))
23142- pgprot_val(forbidden) |= _PAGE_NX;
23143+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23144+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23145
23146+#ifdef CONFIG_DEBUG_RODATA
23147 /*
23148 * The .rodata section needs to be read-only. Using the pfn
23149 * catches all aliases.
23150@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23151 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23152 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23153 pgprot_val(forbidden) |= _PAGE_RW;
23154+#endif
23155
23156 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23157 /*
23158@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23159 }
23160 #endif
23161
23162+#ifdef CONFIG_PAX_KERNEXEC
23163+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23164+ pgprot_val(forbidden) |= _PAGE_RW;
23165+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23166+ }
23167+#endif
23168+
23169 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23170
23171 return prot;
23172@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23173 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23174 {
23175 /* change init_mm */
23176+ pax_open_kernel();
23177 set_pte_atomic(kpte, pte);
23178+
23179 #ifdef CONFIG_X86_32
23180 if (!SHARED_KERNEL_PMD) {
23181+
23182+#ifdef CONFIG_PAX_PER_CPU_PGD
23183+ unsigned long cpu;
23184+#else
23185 struct page *page;
23186+#endif
23187
23188+#ifdef CONFIG_PAX_PER_CPU_PGD
23189+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23190+ pgd_t *pgd = get_cpu_pgd(cpu);
23191+#else
23192 list_for_each_entry(page, &pgd_list, lru) {
23193- pgd_t *pgd;
23194+ pgd_t *pgd = (pgd_t *)page_address(page);
23195+#endif
23196+
23197 pud_t *pud;
23198 pmd_t *pmd;
23199
23200- pgd = (pgd_t *)page_address(page) + pgd_index(address);
23201+ pgd += pgd_index(address);
23202 pud = pud_offset(pgd, address);
23203 pmd = pmd_offset(pud, address);
23204 set_pte_atomic((pte_t *)pmd, pte);
23205 }
23206 }
23207 #endif
23208+ pax_close_kernel();
23209 }
23210
23211 static int
23212diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23213index f6ff57b..481690f 100644
23214--- a/arch/x86/mm/pat.c
23215+++ b/arch/x86/mm/pat.c
23216@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23217
23218 if (!entry) {
23219 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23220- current->comm, current->pid, start, end);
23221+ current->comm, task_pid_nr(current), start, end);
23222 return -EINVAL;
23223 }
23224
23225@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23226 while (cursor < to) {
23227 if (!devmem_is_allowed(pfn)) {
23228 printk(KERN_INFO
23229- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23230- current->comm, from, to);
23231+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23232+ current->comm, from, to, cursor);
23233 return 0;
23234 }
23235 cursor += PAGE_SIZE;
23236@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23237 printk(KERN_INFO
23238 "%s:%d ioremap_change_attr failed %s "
23239 "for %Lx-%Lx\n",
23240- current->comm, current->pid,
23241+ current->comm, task_pid_nr(current),
23242 cattr_name(flags),
23243 base, (unsigned long long)(base + size));
23244 return -EINVAL;
23245@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23246 if (want_flags != flags) {
23247 printk(KERN_WARNING
23248 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23249- current->comm, current->pid,
23250+ current->comm, task_pid_nr(current),
23251 cattr_name(want_flags),
23252 (unsigned long long)paddr,
23253 (unsigned long long)(paddr + size),
23254@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23255 free_memtype(paddr, paddr + size);
23256 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23257 " for %Lx-%Lx, got %s\n",
23258- current->comm, current->pid,
23259+ current->comm, task_pid_nr(current),
23260 cattr_name(want_flags),
23261 (unsigned long long)paddr,
23262 (unsigned long long)(paddr + size),
23263diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23264index 9f0614d..92ae64a 100644
23265--- a/arch/x86/mm/pf_in.c
23266+++ b/arch/x86/mm/pf_in.c
23267@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23268 int i;
23269 enum reason_type rv = OTHERS;
23270
23271- p = (unsigned char *)ins_addr;
23272+ p = (unsigned char *)ktla_ktva(ins_addr);
23273 p += skip_prefix(p, &prf);
23274 p += get_opcode(p, &opcode);
23275
23276@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23277 struct prefix_bits prf;
23278 int i;
23279
23280- p = (unsigned char *)ins_addr;
23281+ p = (unsigned char *)ktla_ktva(ins_addr);
23282 p += skip_prefix(p, &prf);
23283 p += get_opcode(p, &opcode);
23284
23285@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23286 struct prefix_bits prf;
23287 int i;
23288
23289- p = (unsigned char *)ins_addr;
23290+ p = (unsigned char *)ktla_ktva(ins_addr);
23291 p += skip_prefix(p, &prf);
23292 p += get_opcode(p, &opcode);
23293
23294@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23295 struct prefix_bits prf;
23296 int i;
23297
23298- p = (unsigned char *)ins_addr;
23299+ p = (unsigned char *)ktla_ktva(ins_addr);
23300 p += skip_prefix(p, &prf);
23301 p += get_opcode(p, &opcode);
23302 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23303@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23304 struct prefix_bits prf;
23305 int i;
23306
23307- p = (unsigned char *)ins_addr;
23308+ p = (unsigned char *)ktla_ktva(ins_addr);
23309 p += skip_prefix(p, &prf);
23310 p += get_opcode(p, &opcode);
23311 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23312diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23313index 8573b83..6372501 100644
23314--- a/arch/x86/mm/pgtable.c
23315+++ b/arch/x86/mm/pgtable.c
23316@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23317 list_del(&page->lru);
23318 }
23319
23320-#define UNSHARED_PTRS_PER_PGD \
23321- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23322+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23323+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23324
23325+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23326+{
23327+ while (count--)
23328+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23329+}
23330+#endif
23331
23332+#ifdef CONFIG_PAX_PER_CPU_PGD
23333+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23334+{
23335+ while (count--)
23336+
23337+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23338+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23339+#else
23340+ *dst++ = *src++;
23341+#endif
23342+
23343+}
23344+#endif
23345+
23346+#ifdef CONFIG_X86_64
23347+#define pxd_t pud_t
23348+#define pyd_t pgd_t
23349+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23350+#define pxd_free(mm, pud) pud_free((mm), (pud))
23351+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23352+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
23353+#define PYD_SIZE PGDIR_SIZE
23354+#else
23355+#define pxd_t pmd_t
23356+#define pyd_t pud_t
23357+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23358+#define pxd_free(mm, pud) pmd_free((mm), (pud))
23359+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23360+#define pyd_offset(mm ,address) pud_offset((mm), (address))
23361+#define PYD_SIZE PUD_SIZE
23362+#endif
23363+
23364+#ifdef CONFIG_PAX_PER_CPU_PGD
23365+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23366+static inline void pgd_dtor(pgd_t *pgd) {}
23367+#else
23368 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23369 {
23370 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23371@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23372 pgd_list_del(pgd);
23373 spin_unlock(&pgd_lock);
23374 }
23375+#endif
23376
23377 /*
23378 * List of all pgd's needed for non-PAE so it can invalidate entries
23379@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23380 * -- wli
23381 */
23382
23383-#ifdef CONFIG_X86_PAE
23384+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23385 /*
23386 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23387 * updating the top-level pagetable entries to guarantee the
23388@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23389 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23390 * and initialize the kernel pmds here.
23391 */
23392-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23393+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23394
23395 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23396 {
23397@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23398 */
23399 flush_tlb_mm(mm);
23400 }
23401+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23402+#define PREALLOCATED_PXDS USER_PGD_PTRS
23403 #else /* !CONFIG_X86_PAE */
23404
23405 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23406-#define PREALLOCATED_PMDS 0
23407+#define PREALLOCATED_PXDS 0
23408
23409 #endif /* CONFIG_X86_PAE */
23410
23411-static void free_pmds(pmd_t *pmds[])
23412+static void free_pxds(pxd_t *pxds[])
23413 {
23414 int i;
23415
23416- for(i = 0; i < PREALLOCATED_PMDS; i++)
23417- if (pmds[i])
23418- free_page((unsigned long)pmds[i]);
23419+ for(i = 0; i < PREALLOCATED_PXDS; i++)
23420+ if (pxds[i])
23421+ free_page((unsigned long)pxds[i]);
23422 }
23423
23424-static int preallocate_pmds(pmd_t *pmds[])
23425+static int preallocate_pxds(pxd_t *pxds[])
23426 {
23427 int i;
23428 bool failed = false;
23429
23430- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23431- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23432- if (pmd == NULL)
23433+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23434+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23435+ if (pxd == NULL)
23436 failed = true;
23437- pmds[i] = pmd;
23438+ pxds[i] = pxd;
23439 }
23440
23441 if (failed) {
23442- free_pmds(pmds);
23443+ free_pxds(pxds);
23444 return -ENOMEM;
23445 }
23446
23447@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23448 * preallocate which never got a corresponding vma will need to be
23449 * freed manually.
23450 */
23451-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23452+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23453 {
23454 int i;
23455
23456- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23457+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23458 pgd_t pgd = pgdp[i];
23459
23460 if (pgd_val(pgd) != 0) {
23461- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23462+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23463
23464- pgdp[i] = native_make_pgd(0);
23465+ set_pgd(pgdp + i, native_make_pgd(0));
23466
23467- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23468- pmd_free(mm, pmd);
23469+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23470+ pxd_free(mm, pxd);
23471 }
23472 }
23473 }
23474
23475-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23476+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23477 {
23478- pud_t *pud;
23479+ pyd_t *pyd;
23480 unsigned long addr;
23481 int i;
23482
23483- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23484+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23485 return;
23486
23487- pud = pud_offset(pgd, 0);
23488+#ifdef CONFIG_X86_64
23489+ pyd = pyd_offset(mm, 0L);
23490+#else
23491+ pyd = pyd_offset(pgd, 0L);
23492+#endif
23493
23494- for (addr = i = 0; i < PREALLOCATED_PMDS;
23495- i++, pud++, addr += PUD_SIZE) {
23496- pmd_t *pmd = pmds[i];
23497+ for (addr = i = 0; i < PREALLOCATED_PXDS;
23498+ i++, pyd++, addr += PYD_SIZE) {
23499+ pxd_t *pxd = pxds[i];
23500
23501 if (i >= KERNEL_PGD_BOUNDARY)
23502- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23503- sizeof(pmd_t) * PTRS_PER_PMD);
23504+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23505+ sizeof(pxd_t) * PTRS_PER_PMD);
23506
23507- pud_populate(mm, pud, pmd);
23508+ pyd_populate(mm, pyd, pxd);
23509 }
23510 }
23511
23512 pgd_t *pgd_alloc(struct mm_struct *mm)
23513 {
23514 pgd_t *pgd;
23515- pmd_t *pmds[PREALLOCATED_PMDS];
23516+ pxd_t *pxds[PREALLOCATED_PXDS];
23517
23518 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23519
23520@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23521
23522 mm->pgd = pgd;
23523
23524- if (preallocate_pmds(pmds) != 0)
23525+ if (preallocate_pxds(pxds) != 0)
23526 goto out_free_pgd;
23527
23528 if (paravirt_pgd_alloc(mm) != 0)
23529- goto out_free_pmds;
23530+ goto out_free_pxds;
23531
23532 /*
23533 * Make sure that pre-populating the pmds is atomic with
23534@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23535 spin_lock(&pgd_lock);
23536
23537 pgd_ctor(mm, pgd);
23538- pgd_prepopulate_pmd(mm, pgd, pmds);
23539+ pgd_prepopulate_pxd(mm, pgd, pxds);
23540
23541 spin_unlock(&pgd_lock);
23542
23543 return pgd;
23544
23545-out_free_pmds:
23546- free_pmds(pmds);
23547+out_free_pxds:
23548+ free_pxds(pxds);
23549 out_free_pgd:
23550 free_page((unsigned long)pgd);
23551 out:
23552@@ -295,7 +344,7 @@ out:
23553
23554 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23555 {
23556- pgd_mop_up_pmds(mm, pgd);
23557+ pgd_mop_up_pxds(mm, pgd);
23558 pgd_dtor(pgd);
23559 paravirt_pgd_free(mm, pgd);
23560 free_page((unsigned long)pgd);
23561diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23562index cac7184..09a39fa 100644
23563--- a/arch/x86/mm/pgtable_32.c
23564+++ b/arch/x86/mm/pgtable_32.c
23565@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23566 return;
23567 }
23568 pte = pte_offset_kernel(pmd, vaddr);
23569+
23570+ pax_open_kernel();
23571 if (pte_val(pteval))
23572 set_pte_at(&init_mm, vaddr, pte, pteval);
23573 else
23574 pte_clear(&init_mm, vaddr, pte);
23575+ pax_close_kernel();
23576
23577 /*
23578 * It's enough to flush this one mapping.
23579diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23580index 410531d..0f16030 100644
23581--- a/arch/x86/mm/setup_nx.c
23582+++ b/arch/x86/mm/setup_nx.c
23583@@ -5,8 +5,10 @@
23584 #include <asm/pgtable.h>
23585 #include <asm/proto.h>
23586
23587+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23588 static int disable_nx __cpuinitdata;
23589
23590+#ifndef CONFIG_PAX_PAGEEXEC
23591 /*
23592 * noexec = on|off
23593 *
23594@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23595 return 0;
23596 }
23597 early_param("noexec", noexec_setup);
23598+#endif
23599+
23600+#endif
23601
23602 void __cpuinit x86_configure_nx(void)
23603 {
23604+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23605 if (cpu_has_nx && !disable_nx)
23606 __supported_pte_mask |= _PAGE_NX;
23607 else
23608+#endif
23609 __supported_pte_mask &= ~_PAGE_NX;
23610 }
23611
23612diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23613index d6c0418..06a0ad5 100644
23614--- a/arch/x86/mm/tlb.c
23615+++ b/arch/x86/mm/tlb.c
23616@@ -65,7 +65,11 @@ void leave_mm(int cpu)
23617 BUG();
23618 cpumask_clear_cpu(cpu,
23619 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23620+
23621+#ifndef CONFIG_PAX_PER_CPU_PGD
23622 load_cr3(swapper_pg_dir);
23623+#endif
23624+
23625 }
23626 EXPORT_SYMBOL_GPL(leave_mm);
23627
23628diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23629index 6687022..ceabcfa 100644
23630--- a/arch/x86/net/bpf_jit.S
23631+++ b/arch/x86/net/bpf_jit.S
23632@@ -9,6 +9,7 @@
23633 */
23634 #include <linux/linkage.h>
23635 #include <asm/dwarf2.h>
23636+#include <asm/alternative-asm.h>
23637
23638 /*
23639 * Calling convention :
23640@@ -35,6 +36,7 @@ sk_load_word:
23641 jle bpf_slow_path_word
23642 mov (SKBDATA,%rsi),%eax
23643 bswap %eax /* ntohl() */
23644+ pax_force_retaddr
23645 ret
23646
23647
23648@@ -53,6 +55,7 @@ sk_load_half:
23649 jle bpf_slow_path_half
23650 movzwl (SKBDATA,%rsi),%eax
23651 rol $8,%ax # ntohs()
23652+ pax_force_retaddr
23653 ret
23654
23655 sk_load_byte_ind:
23656@@ -66,6 +69,7 @@ sk_load_byte:
23657 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23658 jle bpf_slow_path_byte
23659 movzbl (SKBDATA,%rsi),%eax
23660+ pax_force_retaddr
23661 ret
23662
23663 /**
23664@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23665 movzbl (SKBDATA,%rsi),%ebx
23666 and $15,%bl
23667 shl $2,%bl
23668+ pax_force_retaddr
23669 ret
23670 CFI_ENDPROC
23671 ENDPROC(sk_load_byte_msh)
23672@@ -91,6 +96,7 @@ bpf_error:
23673 xor %eax,%eax
23674 mov -8(%rbp),%rbx
23675 leaveq
23676+ pax_force_retaddr
23677 ret
23678
23679 /* rsi contains offset and can be scratched */
23680@@ -113,6 +119,7 @@ bpf_slow_path_word:
23681 js bpf_error
23682 mov -12(%rbp),%eax
23683 bswap %eax
23684+ pax_force_retaddr
23685 ret
23686
23687 bpf_slow_path_half:
23688@@ -121,12 +128,14 @@ bpf_slow_path_half:
23689 mov -12(%rbp),%ax
23690 rol $8,%ax
23691 movzwl %ax,%eax
23692+ pax_force_retaddr
23693 ret
23694
23695 bpf_slow_path_byte:
23696 bpf_slow_path_common(1)
23697 js bpf_error
23698 movzbl -12(%rbp),%eax
23699+ pax_force_retaddr
23700 ret
23701
23702 bpf_slow_path_byte_msh:
23703@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23704 and $15,%al
23705 shl $2,%al
23706 xchg %eax,%ebx
23707+ pax_force_retaddr
23708 ret
23709diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23710index 7b65f75..63097f6 100644
23711--- a/arch/x86/net/bpf_jit_comp.c
23712+++ b/arch/x86/net/bpf_jit_comp.c
23713@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23714 set_fs(old_fs);
23715 }
23716
23717+struct bpf_jit_work {
23718+ struct work_struct work;
23719+ void *image;
23720+};
23721
23722 void bpf_jit_compile(struct sk_filter *fp)
23723 {
23724@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23725 if (addrs == NULL)
23726 return;
23727
23728+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23729+ if (!fp->work)
23730+ goto out;
23731+
23732 /* Before first pass, make a rough estimation of addrs[]
23733 * each bpf instruction is translated to less than 64 bytes
23734 */
23735@@ -585,11 +593,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23736 if (image) {
23737 if (unlikely(proglen + ilen > oldproglen)) {
23738 pr_err("bpb_jit_compile fatal error\n");
23739- kfree(addrs);
23740- module_free(NULL, image);
23741- return;
23742+ module_free_exec(NULL, image);
23743+ goto out;
23744 }
23745+ pax_open_kernel();
23746 memcpy(image + proglen, temp, ilen);
23747+ pax_close_kernel();
23748 }
23749 proglen += ilen;
23750 addrs[i] = proglen;
23751@@ -609,7 +618,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23752 break;
23753 }
23754 if (proglen == oldproglen) {
23755- image = module_alloc(max_t(unsigned int,
23756+ image = module_alloc_exec(max_t(unsigned int,
23757 proglen,
23758 sizeof(struct work_struct)));
23759 if (!image)
23760@@ -631,24 +640,27 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23761 fp->bpf_func = (void *)image;
23762 }
23763 out:
23764+ kfree(fp->work);
23765 kfree(addrs);
23766 return;
23767 }
23768
23769 static void jit_free_defer(struct work_struct *arg)
23770 {
23771- module_free(NULL, arg);
23772+ module_free_exec(NULL, ((struct bpf_jit_work*)arg)->image);
23773+ kfree(arg);
23774 }
23775
23776 /* run from softirq, we must use a work_struct to call
23777- * module_free() from process context
23778+ * module_free_exec() from process context
23779 */
23780 void bpf_jit_free(struct sk_filter *fp)
23781 {
23782 if (fp->bpf_func != sk_run_filter) {
23783- struct work_struct *work = (struct work_struct *)fp->bpf_func;
23784+ struct work_struct *work = &fp->work->work;
23785
23786 INIT_WORK(work, jit_free_defer);
23787+ fp->work->image = fp->bpf_func;
23788 schedule_work(work);
23789 }
23790 }
23791diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
23792index bff89df..377758a 100644
23793--- a/arch/x86/oprofile/backtrace.c
23794+++ b/arch/x86/oprofile/backtrace.c
23795@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
23796 struct stack_frame_ia32 *fp;
23797 unsigned long bytes;
23798
23799- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23800+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23801 if (bytes != sizeof(bufhead))
23802 return NULL;
23803
23804- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
23805+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
23806
23807 oprofile_add_trace(bufhead[0].return_address);
23808
23809@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
23810 struct stack_frame bufhead[2];
23811 unsigned long bytes;
23812
23813- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23814+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23815 if (bytes != sizeof(bufhead))
23816 return NULL;
23817
23818@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
23819 {
23820 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
23821
23822- if (!user_mode_vm(regs)) {
23823+ if (!user_mode(regs)) {
23824 unsigned long stack = kernel_stack_pointer(regs);
23825 if (depth)
23826 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23827diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
23828index cb29191..036766d 100644
23829--- a/arch/x86/pci/mrst.c
23830+++ b/arch/x86/pci/mrst.c
23831@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
23832 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
23833 pci_mmcfg_late_init();
23834 pcibios_enable_irq = mrst_pci_irq_enable;
23835- pci_root_ops = pci_mrst_ops;
23836+ pax_open_kernel();
23837+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
23838+ pax_close_kernel();
23839 /* Continue with standard init */
23840 return 1;
23841 }
23842diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
23843index db0e9a5..8844dea 100644
23844--- a/arch/x86/pci/pcbios.c
23845+++ b/arch/x86/pci/pcbios.c
23846@@ -79,50 +79,93 @@ union bios32 {
23847 static struct {
23848 unsigned long address;
23849 unsigned short segment;
23850-} bios32_indirect = { 0, __KERNEL_CS };
23851+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23852
23853 /*
23854 * Returns the entry point for the given service, NULL on error
23855 */
23856
23857-static unsigned long bios32_service(unsigned long service)
23858+static unsigned long __devinit bios32_service(unsigned long service)
23859 {
23860 unsigned char return_code; /* %al */
23861 unsigned long address; /* %ebx */
23862 unsigned long length; /* %ecx */
23863 unsigned long entry; /* %edx */
23864 unsigned long flags;
23865+ struct desc_struct d, *gdt;
23866
23867 local_irq_save(flags);
23868- __asm__("lcall *(%%edi); cld"
23869+
23870+ gdt = get_cpu_gdt_table(smp_processor_id());
23871+
23872+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23873+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23874+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23875+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23876+
23877+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23878 : "=a" (return_code),
23879 "=b" (address),
23880 "=c" (length),
23881 "=d" (entry)
23882 : "0" (service),
23883 "1" (0),
23884- "D" (&bios32_indirect));
23885+ "D" (&bios32_indirect),
23886+ "r"(__PCIBIOS_DS)
23887+ : "memory");
23888+
23889+ pax_open_kernel();
23890+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23891+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23892+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23893+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23894+ pax_close_kernel();
23895+
23896 local_irq_restore(flags);
23897
23898 switch (return_code) {
23899- case 0:
23900- return address + entry;
23901- case 0x80: /* Not present */
23902- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23903- return 0;
23904- default: /* Shouldn't happen */
23905- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23906- service, return_code);
23907+ case 0: {
23908+ int cpu;
23909+ unsigned char flags;
23910+
23911+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23912+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23913+ printk(KERN_WARNING "bios32_service: not valid\n");
23914 return 0;
23915+ }
23916+ address = address + PAGE_OFFSET;
23917+ length += 16UL; /* some BIOSs underreport this... */
23918+ flags = 4;
23919+ if (length >= 64*1024*1024) {
23920+ length >>= PAGE_SHIFT;
23921+ flags |= 8;
23922+ }
23923+
23924+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
23925+ gdt = get_cpu_gdt_table(cpu);
23926+ pack_descriptor(&d, address, length, 0x9b, flags);
23927+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23928+ pack_descriptor(&d, address, length, 0x93, flags);
23929+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23930+ }
23931+ return entry;
23932+ }
23933+ case 0x80: /* Not present */
23934+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23935+ return 0;
23936+ default: /* Shouldn't happen */
23937+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23938+ service, return_code);
23939+ return 0;
23940 }
23941 }
23942
23943 static struct {
23944 unsigned long address;
23945 unsigned short segment;
23946-} pci_indirect = { 0, __KERNEL_CS };
23947+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23948
23949-static int pci_bios_present;
23950+static int pci_bios_present __read_only;
23951
23952 static int __devinit check_pcibios(void)
23953 {
23954@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
23955 unsigned long flags, pcibios_entry;
23956
23957 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23958- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23959+ pci_indirect.address = pcibios_entry;
23960
23961 local_irq_save(flags);
23962- __asm__(
23963- "lcall *(%%edi); cld\n\t"
23964+ __asm__("movw %w6, %%ds\n\t"
23965+ "lcall *%%ss:(%%edi); cld\n\t"
23966+ "push %%ss\n\t"
23967+ "pop %%ds\n\t"
23968 "jc 1f\n\t"
23969 "xor %%ah, %%ah\n"
23970 "1:"
23971@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
23972 "=b" (ebx),
23973 "=c" (ecx)
23974 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23975- "D" (&pci_indirect)
23976+ "D" (&pci_indirect),
23977+ "r" (__PCIBIOS_DS)
23978 : "memory");
23979 local_irq_restore(flags);
23980
23981@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23982
23983 switch (len) {
23984 case 1:
23985- __asm__("lcall *(%%esi); cld\n\t"
23986+ __asm__("movw %w6, %%ds\n\t"
23987+ "lcall *%%ss:(%%esi); cld\n\t"
23988+ "push %%ss\n\t"
23989+ "pop %%ds\n\t"
23990 "jc 1f\n\t"
23991 "xor %%ah, %%ah\n"
23992 "1:"
23993@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23994 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23995 "b" (bx),
23996 "D" ((long)reg),
23997- "S" (&pci_indirect));
23998+ "S" (&pci_indirect),
23999+ "r" (__PCIBIOS_DS));
24000 /*
24001 * Zero-extend the result beyond 8 bits, do not trust the
24002 * BIOS having done it:
24003@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24004 *value &= 0xff;
24005 break;
24006 case 2:
24007- __asm__("lcall *(%%esi); cld\n\t"
24008+ __asm__("movw %w6, %%ds\n\t"
24009+ "lcall *%%ss:(%%esi); cld\n\t"
24010+ "push %%ss\n\t"
24011+ "pop %%ds\n\t"
24012 "jc 1f\n\t"
24013 "xor %%ah, %%ah\n"
24014 "1:"
24015@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24016 : "1" (PCIBIOS_READ_CONFIG_WORD),
24017 "b" (bx),
24018 "D" ((long)reg),
24019- "S" (&pci_indirect));
24020+ "S" (&pci_indirect),
24021+ "r" (__PCIBIOS_DS));
24022 /*
24023 * Zero-extend the result beyond 16 bits, do not trust the
24024 * BIOS having done it:
24025@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24026 *value &= 0xffff;
24027 break;
24028 case 4:
24029- __asm__("lcall *(%%esi); cld\n\t"
24030+ __asm__("movw %w6, %%ds\n\t"
24031+ "lcall *%%ss:(%%esi); cld\n\t"
24032+ "push %%ss\n\t"
24033+ "pop %%ds\n\t"
24034 "jc 1f\n\t"
24035 "xor %%ah, %%ah\n"
24036 "1:"
24037@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24038 : "1" (PCIBIOS_READ_CONFIG_DWORD),
24039 "b" (bx),
24040 "D" ((long)reg),
24041- "S" (&pci_indirect));
24042+ "S" (&pci_indirect),
24043+ "r" (__PCIBIOS_DS));
24044 break;
24045 }
24046
24047@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24048
24049 switch (len) {
24050 case 1:
24051- __asm__("lcall *(%%esi); cld\n\t"
24052+ __asm__("movw %w6, %%ds\n\t"
24053+ "lcall *%%ss:(%%esi); cld\n\t"
24054+ "push %%ss\n\t"
24055+ "pop %%ds\n\t"
24056 "jc 1f\n\t"
24057 "xor %%ah, %%ah\n"
24058 "1:"
24059@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24060 "c" (value),
24061 "b" (bx),
24062 "D" ((long)reg),
24063- "S" (&pci_indirect));
24064+ "S" (&pci_indirect),
24065+ "r" (__PCIBIOS_DS));
24066 break;
24067 case 2:
24068- __asm__("lcall *(%%esi); cld\n\t"
24069+ __asm__("movw %w6, %%ds\n\t"
24070+ "lcall *%%ss:(%%esi); cld\n\t"
24071+ "push %%ss\n\t"
24072+ "pop %%ds\n\t"
24073 "jc 1f\n\t"
24074 "xor %%ah, %%ah\n"
24075 "1:"
24076@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24077 "c" (value),
24078 "b" (bx),
24079 "D" ((long)reg),
24080- "S" (&pci_indirect));
24081+ "S" (&pci_indirect),
24082+ "r" (__PCIBIOS_DS));
24083 break;
24084 case 4:
24085- __asm__("lcall *(%%esi); cld\n\t"
24086+ __asm__("movw %w6, %%ds\n\t"
24087+ "lcall *%%ss:(%%esi); cld\n\t"
24088+ "push %%ss\n\t"
24089+ "pop %%ds\n\t"
24090 "jc 1f\n\t"
24091 "xor %%ah, %%ah\n"
24092 "1:"
24093@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24094 "c" (value),
24095 "b" (bx),
24096 "D" ((long)reg),
24097- "S" (&pci_indirect));
24098+ "S" (&pci_indirect),
24099+ "r" (__PCIBIOS_DS));
24100 break;
24101 }
24102
24103@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24104
24105 DBG("PCI: Fetching IRQ routing table... ");
24106 __asm__("push %%es\n\t"
24107+ "movw %w8, %%ds\n\t"
24108 "push %%ds\n\t"
24109 "pop %%es\n\t"
24110- "lcall *(%%esi); cld\n\t"
24111+ "lcall *%%ss:(%%esi); cld\n\t"
24112 "pop %%es\n\t"
24113+ "push %%ss\n\t"
24114+ "pop %%ds\n"
24115 "jc 1f\n\t"
24116 "xor %%ah, %%ah\n"
24117 "1:"
24118@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24119 "1" (0),
24120 "D" ((long) &opt),
24121 "S" (&pci_indirect),
24122- "m" (opt)
24123+ "m" (opt),
24124+ "r" (__PCIBIOS_DS)
24125 : "memory");
24126 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24127 if (ret & 0xff00)
24128@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24129 {
24130 int ret;
24131
24132- __asm__("lcall *(%%esi); cld\n\t"
24133+ __asm__("movw %w5, %%ds\n\t"
24134+ "lcall *%%ss:(%%esi); cld\n\t"
24135+ "push %%ss\n\t"
24136+ "pop %%ds\n"
24137 "jc 1f\n\t"
24138 "xor %%ah, %%ah\n"
24139 "1:"
24140@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24141 : "0" (PCIBIOS_SET_PCI_HW_INT),
24142 "b" ((dev->bus->number << 8) | dev->devfn),
24143 "c" ((irq << 8) | (pin + 10)),
24144- "S" (&pci_indirect));
24145+ "S" (&pci_indirect),
24146+ "r" (__PCIBIOS_DS));
24147 return !(ret & 0xff00);
24148 }
24149 EXPORT_SYMBOL(pcibios_set_irq_routing);
24150diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24151index 40e4469..1ab536e 100644
24152--- a/arch/x86/platform/efi/efi_32.c
24153+++ b/arch/x86/platform/efi/efi_32.c
24154@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
24155 {
24156 struct desc_ptr gdt_descr;
24157
24158+#ifdef CONFIG_PAX_KERNEXEC
24159+ struct desc_struct d;
24160+#endif
24161+
24162 local_irq_save(efi_rt_eflags);
24163
24164 load_cr3(initial_page_table);
24165 __flush_tlb_all();
24166
24167+#ifdef CONFIG_PAX_KERNEXEC
24168+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24169+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24170+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24171+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24172+#endif
24173+
24174 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24175 gdt_descr.size = GDT_SIZE - 1;
24176 load_gdt(&gdt_descr);
24177@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
24178 {
24179 struct desc_ptr gdt_descr;
24180
24181+#ifdef CONFIG_PAX_KERNEXEC
24182+ struct desc_struct d;
24183+
24184+ memset(&d, 0, sizeof d);
24185+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24186+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24187+#endif
24188+
24189 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24190 gdt_descr.size = GDT_SIZE - 1;
24191 load_gdt(&gdt_descr);
24192diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24193index fbe66e6..c5c0dd2 100644
24194--- a/arch/x86/platform/efi/efi_stub_32.S
24195+++ b/arch/x86/platform/efi/efi_stub_32.S
24196@@ -6,7 +6,9 @@
24197 */
24198
24199 #include <linux/linkage.h>
24200+#include <linux/init.h>
24201 #include <asm/page_types.h>
24202+#include <asm/segment.h>
24203
24204 /*
24205 * efi_call_phys(void *, ...) is a function with variable parameters.
24206@@ -20,7 +22,7 @@
24207 * service functions will comply with gcc calling convention, too.
24208 */
24209
24210-.text
24211+__INIT
24212 ENTRY(efi_call_phys)
24213 /*
24214 * 0. The function can only be called in Linux kernel. So CS has been
24215@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24216 * The mapping of lower virtual memory has been created in prelog and
24217 * epilog.
24218 */
24219- movl $1f, %edx
24220- subl $__PAGE_OFFSET, %edx
24221- jmp *%edx
24222+ movl $(__KERNEXEC_EFI_DS), %edx
24223+ mov %edx, %ds
24224+ mov %edx, %es
24225+ mov %edx, %ss
24226+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24227 1:
24228
24229 /*
24230@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24231 * parameter 2, ..., param n. To make things easy, we save the return
24232 * address of efi_call_phys in a global variable.
24233 */
24234- popl %edx
24235- movl %edx, saved_return_addr
24236- /* get the function pointer into ECX*/
24237- popl %ecx
24238- movl %ecx, efi_rt_function_ptr
24239- movl $2f, %edx
24240- subl $__PAGE_OFFSET, %edx
24241- pushl %edx
24242+ popl (saved_return_addr)
24243+ popl (efi_rt_function_ptr)
24244
24245 /*
24246 * 3. Clear PG bit in %CR0.
24247@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24248 /*
24249 * 5. Call the physical function.
24250 */
24251- jmp *%ecx
24252+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
24253
24254-2:
24255 /*
24256 * 6. After EFI runtime service returns, control will return to
24257 * following instruction. We'd better readjust stack pointer first.
24258@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24259 movl %cr0, %edx
24260 orl $0x80000000, %edx
24261 movl %edx, %cr0
24262- jmp 1f
24263-1:
24264+
24265 /*
24266 * 8. Now restore the virtual mode from flat mode by
24267 * adding EIP with PAGE_OFFSET.
24268 */
24269- movl $1f, %edx
24270- jmp *%edx
24271+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24272 1:
24273+ movl $(__KERNEL_DS), %edx
24274+ mov %edx, %ds
24275+ mov %edx, %es
24276+ mov %edx, %ss
24277
24278 /*
24279 * 9. Balance the stack. And because EAX contain the return value,
24280 * we'd better not clobber it.
24281 */
24282- leal efi_rt_function_ptr, %edx
24283- movl (%edx), %ecx
24284- pushl %ecx
24285+ pushl (efi_rt_function_ptr)
24286
24287 /*
24288- * 10. Push the saved return address onto the stack and return.
24289+ * 10. Return to the saved return address.
24290 */
24291- leal saved_return_addr, %edx
24292- movl (%edx), %ecx
24293- pushl %ecx
24294- ret
24295+ jmpl *(saved_return_addr)
24296 ENDPROC(efi_call_phys)
24297 .previous
24298
24299-.data
24300+__INITDATA
24301 saved_return_addr:
24302 .long 0
24303 efi_rt_function_ptr:
24304diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24305index 4c07cca..2c8427d 100644
24306--- a/arch/x86/platform/efi/efi_stub_64.S
24307+++ b/arch/x86/platform/efi/efi_stub_64.S
24308@@ -7,6 +7,7 @@
24309 */
24310
24311 #include <linux/linkage.h>
24312+#include <asm/alternative-asm.h>
24313
24314 #define SAVE_XMM \
24315 mov %rsp, %rax; \
24316@@ -40,6 +41,7 @@ ENTRY(efi_call0)
24317 call *%rdi
24318 addq $32, %rsp
24319 RESTORE_XMM
24320+ pax_force_retaddr 0, 1
24321 ret
24322 ENDPROC(efi_call0)
24323
24324@@ -50,6 +52,7 @@ ENTRY(efi_call1)
24325 call *%rdi
24326 addq $32, %rsp
24327 RESTORE_XMM
24328+ pax_force_retaddr 0, 1
24329 ret
24330 ENDPROC(efi_call1)
24331
24332@@ -60,6 +63,7 @@ ENTRY(efi_call2)
24333 call *%rdi
24334 addq $32, %rsp
24335 RESTORE_XMM
24336+ pax_force_retaddr 0, 1
24337 ret
24338 ENDPROC(efi_call2)
24339
24340@@ -71,6 +75,7 @@ ENTRY(efi_call3)
24341 call *%rdi
24342 addq $32, %rsp
24343 RESTORE_XMM
24344+ pax_force_retaddr 0, 1
24345 ret
24346 ENDPROC(efi_call3)
24347
24348@@ -83,6 +88,7 @@ ENTRY(efi_call4)
24349 call *%rdi
24350 addq $32, %rsp
24351 RESTORE_XMM
24352+ pax_force_retaddr 0, 1
24353 ret
24354 ENDPROC(efi_call4)
24355
24356@@ -96,6 +102,7 @@ ENTRY(efi_call5)
24357 call *%rdi
24358 addq $48, %rsp
24359 RESTORE_XMM
24360+ pax_force_retaddr 0, 1
24361 ret
24362 ENDPROC(efi_call5)
24363
24364@@ -112,5 +119,6 @@ ENTRY(efi_call6)
24365 call *%rdi
24366 addq $48, %rsp
24367 RESTORE_XMM
24368+ pax_force_retaddr 0, 1
24369 ret
24370 ENDPROC(efi_call6)
24371diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24372index ad4ec1c..686479e 100644
24373--- a/arch/x86/platform/mrst/mrst.c
24374+++ b/arch/x86/platform/mrst/mrst.c
24375@@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
24376 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
24377 int sfi_mrtc_num;
24378
24379-static void mrst_power_off(void)
24380+static __noreturn void mrst_power_off(void)
24381 {
24382 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24383 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
24384+ BUG();
24385 }
24386
24387-static void mrst_reboot(void)
24388+static __noreturn void mrst_reboot(void)
24389 {
24390 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24391 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
24392 else
24393 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
24394+ BUG();
24395 }
24396
24397 /* parse all the mtimer info to a static mtimer array */
24398diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24399index f10c0af..3ec1f95 100644
24400--- a/arch/x86/power/cpu.c
24401+++ b/arch/x86/power/cpu.c
24402@@ -131,7 +131,7 @@ static void do_fpu_end(void)
24403 static void fix_processor_context(void)
24404 {
24405 int cpu = smp_processor_id();
24406- struct tss_struct *t = &per_cpu(init_tss, cpu);
24407+ struct tss_struct *t = init_tss + cpu;
24408
24409 set_tss_desc(cpu, t); /*
24410 * This just modifies memory; should not be
24411@@ -141,7 +141,9 @@ static void fix_processor_context(void)
24412 */
24413
24414 #ifdef CONFIG_X86_64
24415+ pax_open_kernel();
24416 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24417+ pax_close_kernel();
24418
24419 syscall_init(); /* This sets MSR_*STAR and related */
24420 #endif
24421diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24422index 5d17950..2253fc9 100644
24423--- a/arch/x86/vdso/Makefile
24424+++ b/arch/x86/vdso/Makefile
24425@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24426 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24427 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24428
24429-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24430+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24431 GCOV_PROFILE := n
24432
24433 #
24434diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24435index 468d591..8e80a0a 100644
24436--- a/arch/x86/vdso/vdso32-setup.c
24437+++ b/arch/x86/vdso/vdso32-setup.c
24438@@ -25,6 +25,7 @@
24439 #include <asm/tlbflush.h>
24440 #include <asm/vdso.h>
24441 #include <asm/proto.h>
24442+#include <asm/mman.h>
24443
24444 enum {
24445 VDSO_DISABLED = 0,
24446@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24447 void enable_sep_cpu(void)
24448 {
24449 int cpu = get_cpu();
24450- struct tss_struct *tss = &per_cpu(init_tss, cpu);
24451+ struct tss_struct *tss = init_tss + cpu;
24452
24453 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24454 put_cpu();
24455@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24456 gate_vma.vm_start = FIXADDR_USER_START;
24457 gate_vma.vm_end = FIXADDR_USER_END;
24458 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24459- gate_vma.vm_page_prot = __P101;
24460+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24461 /*
24462 * Make sure the vDSO gets into every core dump.
24463 * Dumping its contents makes post-mortem fully interpretable later
24464@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24465 if (compat)
24466 addr = VDSO_HIGH_BASE;
24467 else {
24468- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24469+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24470 if (IS_ERR_VALUE(addr)) {
24471 ret = addr;
24472 goto up_fail;
24473 }
24474 }
24475
24476- current->mm->context.vdso = (void *)addr;
24477+ current->mm->context.vdso = addr;
24478
24479 if (compat_uses_vma || !compat) {
24480 /*
24481@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24482 }
24483
24484 current_thread_info()->sysenter_return =
24485- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24486+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24487
24488 up_fail:
24489 if (ret)
24490- current->mm->context.vdso = NULL;
24491+ current->mm->context.vdso = 0;
24492
24493 up_write(&mm->mmap_sem);
24494
24495@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24496
24497 const char *arch_vma_name(struct vm_area_struct *vma)
24498 {
24499- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24500+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24501 return "[vdso]";
24502+
24503+#ifdef CONFIG_PAX_SEGMEXEC
24504+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24505+ return "[vdso]";
24506+#endif
24507+
24508 return NULL;
24509 }
24510
24511@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24512 * Check to see if the corresponding task was created in compat vdso
24513 * mode.
24514 */
24515- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24516+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24517 return &gate_vma;
24518 return NULL;
24519 }
24520diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24521index 153407c..611cba9 100644
24522--- a/arch/x86/vdso/vma.c
24523+++ b/arch/x86/vdso/vma.c
24524@@ -16,8 +16,6 @@
24525 #include <asm/vdso.h>
24526 #include <asm/page.h>
24527
24528-unsigned int __read_mostly vdso_enabled = 1;
24529-
24530 extern char vdso_start[], vdso_end[];
24531 extern unsigned short vdso_sync_cpuid;
24532
24533@@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24534 * unaligned here as a result of stack start randomization.
24535 */
24536 addr = PAGE_ALIGN(addr);
24537- addr = align_addr(addr, NULL, ALIGN_VDSO);
24538
24539 return addr;
24540 }
24541@@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24542 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24543 {
24544 struct mm_struct *mm = current->mm;
24545- unsigned long addr;
24546+ unsigned long addr = 0;
24547 int ret;
24548
24549- if (!vdso_enabled)
24550- return 0;
24551-
24552 down_write(&mm->mmap_sem);
24553+
24554+#ifdef CONFIG_PAX_RANDMMAP
24555+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24556+#endif
24557+
24558 addr = vdso_addr(mm->start_stack, vdso_size);
24559+ addr = align_addr(addr, NULL, ALIGN_VDSO);
24560 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24561 if (IS_ERR_VALUE(addr)) {
24562 ret = addr;
24563 goto up_fail;
24564 }
24565
24566- current->mm->context.vdso = (void *)addr;
24567+ mm->context.vdso = addr;
24568
24569 ret = install_special_mapping(mm, addr, vdso_size,
24570 VM_READ|VM_EXEC|
24571 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24572 VM_ALWAYSDUMP,
24573 vdso_pages);
24574- if (ret) {
24575- current->mm->context.vdso = NULL;
24576- goto up_fail;
24577- }
24578+
24579+ if (ret)
24580+ mm->context.vdso = 0;
24581
24582 up_fail:
24583 up_write(&mm->mmap_sem);
24584 return ret;
24585 }
24586-
24587-static __init int vdso_setup(char *s)
24588-{
24589- vdso_enabled = simple_strtoul(s, NULL, 0);
24590- return 0;
24591-}
24592-__setup("vdso=", vdso_setup);
24593diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24594index 1f92865..c843b20 100644
24595--- a/arch/x86/xen/enlighten.c
24596+++ b/arch/x86/xen/enlighten.c
24597@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24598
24599 struct shared_info xen_dummy_shared_info;
24600
24601-void *xen_initial_gdt;
24602-
24603 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24604 __read_mostly int xen_have_vector_callback;
24605 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24606@@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24607 #endif
24608 };
24609
24610-static void xen_reboot(int reason)
24611+static __noreturn void xen_reboot(int reason)
24612 {
24613 struct sched_shutdown r = { .reason = reason };
24614
24615@@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
24616 BUG();
24617 }
24618
24619-static void xen_restart(char *msg)
24620+static __noreturn void xen_restart(char *msg)
24621 {
24622 xen_reboot(SHUTDOWN_reboot);
24623 }
24624
24625-static void xen_emergency_restart(void)
24626+static __noreturn void xen_emergency_restart(void)
24627 {
24628 xen_reboot(SHUTDOWN_reboot);
24629 }
24630
24631-static void xen_machine_halt(void)
24632+static __noreturn void xen_machine_halt(void)
24633 {
24634 xen_reboot(SHUTDOWN_poweroff);
24635 }
24636@@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
24637 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24638
24639 /* Work out if we support NX */
24640- x86_configure_nx();
24641+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24642+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24643+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24644+ unsigned l, h;
24645+
24646+ __supported_pte_mask |= _PAGE_NX;
24647+ rdmsr(MSR_EFER, l, h);
24648+ l |= EFER_NX;
24649+ wrmsr(MSR_EFER, l, h);
24650+ }
24651+#endif
24652
24653 xen_setup_features();
24654
24655@@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
24656
24657 machine_ops = xen_machine_ops;
24658
24659- /*
24660- * The only reliable way to retain the initial address of the
24661- * percpu gdt_page is to remember it here, so we can go and
24662- * mark it RW later, when the initial percpu area is freed.
24663- */
24664- xen_initial_gdt = &per_cpu(gdt_page, 0);
24665-
24666 xen_smp_init();
24667
24668 #ifdef CONFIG_ACPI_NUMA
24669diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24670index 87f6673..e2555a6 100644
24671--- a/arch/x86/xen/mmu.c
24672+++ b/arch/x86/xen/mmu.c
24673@@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24674 convert_pfn_mfn(init_level4_pgt);
24675 convert_pfn_mfn(level3_ident_pgt);
24676 convert_pfn_mfn(level3_kernel_pgt);
24677+ convert_pfn_mfn(level3_vmalloc_start_pgt);
24678+ convert_pfn_mfn(level3_vmalloc_end_pgt);
24679+ convert_pfn_mfn(level3_vmemmap_pgt);
24680
24681 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24682 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24683@@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24684 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24685 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24686 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24687+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
24688+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
24689+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24690 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24691+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24692 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24693 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24694
24695@@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
24696 pv_mmu_ops.set_pud = xen_set_pud;
24697 #if PAGETABLE_LEVELS == 4
24698 pv_mmu_ops.set_pgd = xen_set_pgd;
24699+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24700 #endif
24701
24702 /* This will work as long as patching hasn't happened yet
24703@@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
24704 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24705 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24706 .set_pgd = xen_set_pgd_hyper,
24707+ .set_pgd_batched = xen_set_pgd_hyper,
24708
24709 .alloc_pud = xen_alloc_pmd_init,
24710 .release_pud = xen_release_pmd_init,
24711diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
24712index 041d4fe..7666b7e 100644
24713--- a/arch/x86/xen/smp.c
24714+++ b/arch/x86/xen/smp.c
24715@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
24716 {
24717 BUG_ON(smp_processor_id() != 0);
24718 native_smp_prepare_boot_cpu();
24719-
24720- /* We've switched to the "real" per-cpu gdt, so make sure the
24721- old memory can be recycled */
24722- make_lowmem_page_readwrite(xen_initial_gdt);
24723-
24724 xen_filter_cpu_maps();
24725 xen_setup_vcpu_info_placement();
24726 }
24727@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
24728 gdt = get_cpu_gdt_table(cpu);
24729
24730 ctxt->flags = VGCF_IN_KERNEL;
24731- ctxt->user_regs.ds = __USER_DS;
24732- ctxt->user_regs.es = __USER_DS;
24733+ ctxt->user_regs.ds = __KERNEL_DS;
24734+ ctxt->user_regs.es = __KERNEL_DS;
24735 ctxt->user_regs.ss = __KERNEL_DS;
24736 #ifdef CONFIG_X86_32
24737 ctxt->user_regs.fs = __KERNEL_PERCPU;
24738- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
24739+ savesegment(gs, ctxt->user_regs.gs);
24740 #else
24741 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24742 #endif
24743@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
24744 int rc;
24745
24746 per_cpu(current_task, cpu) = idle;
24747+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
24748 #ifdef CONFIG_X86_32
24749 irq_ctx_init(cpu);
24750 #else
24751 clear_tsk_thread_flag(idle, TIF_FORK);
24752- per_cpu(kernel_stack, cpu) =
24753- (unsigned long)task_stack_page(idle) -
24754- KERNEL_STACK_OFFSET + THREAD_SIZE;
24755+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24756 #endif
24757 xen_setup_runstate_info(cpu);
24758 xen_setup_timer(cpu);
24759diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
24760index b040b0e..8cc4fe0 100644
24761--- a/arch/x86/xen/xen-asm_32.S
24762+++ b/arch/x86/xen/xen-asm_32.S
24763@@ -83,14 +83,14 @@ ENTRY(xen_iret)
24764 ESP_OFFSET=4 # bytes pushed onto stack
24765
24766 /*
24767- * Store vcpu_info pointer for easy access. Do it this way to
24768- * avoid having to reload %fs
24769+ * Store vcpu_info pointer for easy access.
24770 */
24771 #ifdef CONFIG_SMP
24772- GET_THREAD_INFO(%eax)
24773- movl TI_cpu(%eax), %eax
24774- movl __per_cpu_offset(,%eax,4), %eax
24775- mov xen_vcpu(%eax), %eax
24776+ push %fs
24777+ mov $(__KERNEL_PERCPU), %eax
24778+ mov %eax, %fs
24779+ mov PER_CPU_VAR(xen_vcpu), %eax
24780+ pop %fs
24781 #else
24782 movl xen_vcpu, %eax
24783 #endif
24784diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
24785index aaa7291..3f77960 100644
24786--- a/arch/x86/xen/xen-head.S
24787+++ b/arch/x86/xen/xen-head.S
24788@@ -19,6 +19,17 @@ ENTRY(startup_xen)
24789 #ifdef CONFIG_X86_32
24790 mov %esi,xen_start_info
24791 mov $init_thread_union+THREAD_SIZE,%esp
24792+#ifdef CONFIG_SMP
24793+ movl $cpu_gdt_table,%edi
24794+ movl $__per_cpu_load,%eax
24795+ movw %ax,__KERNEL_PERCPU + 2(%edi)
24796+ rorl $16,%eax
24797+ movb %al,__KERNEL_PERCPU + 4(%edi)
24798+ movb %ah,__KERNEL_PERCPU + 7(%edi)
24799+ movl $__per_cpu_end - 1,%eax
24800+ subl $__per_cpu_start,%eax
24801+ movw %ax,__KERNEL_PERCPU + 0(%edi)
24802+#endif
24803 #else
24804 mov %rsi,xen_start_info
24805 mov $init_thread_union+THREAD_SIZE,%rsp
24806diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
24807index b095739..8c17bcd 100644
24808--- a/arch/x86/xen/xen-ops.h
24809+++ b/arch/x86/xen/xen-ops.h
24810@@ -10,8 +10,6 @@
24811 extern const char xen_hypervisor_callback[];
24812 extern const char xen_failsafe_callback[];
24813
24814-extern void *xen_initial_gdt;
24815-
24816 struct trap_info;
24817 void xen_copy_trap_info(struct trap_info *traps);
24818
24819diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
24820index 58916af..9cb880b 100644
24821--- a/block/blk-iopoll.c
24822+++ b/block/blk-iopoll.c
24823@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
24824 }
24825 EXPORT_SYMBOL(blk_iopoll_complete);
24826
24827-static void blk_iopoll_softirq(struct softirq_action *h)
24828+static void blk_iopoll_softirq(void)
24829 {
24830 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24831 int rearm = 0, budget = blk_iopoll_budget;
24832diff --git a/block/blk-map.c b/block/blk-map.c
24833index 623e1cd..ca1e109 100644
24834--- a/block/blk-map.c
24835+++ b/block/blk-map.c
24836@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
24837 if (!len || !kbuf)
24838 return -EINVAL;
24839
24840- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
24841+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
24842 if (do_copy)
24843 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24844 else
24845diff --git a/block/blk-softirq.c b/block/blk-softirq.c
24846index 1366a89..e17f54b 100644
24847--- a/block/blk-softirq.c
24848+++ b/block/blk-softirq.c
24849@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
24850 * Softirq action handler - move entries to local list and loop over them
24851 * while passing them to the queue registered handler.
24852 */
24853-static void blk_done_softirq(struct softirq_action *h)
24854+static void blk_done_softirq(void)
24855 {
24856 struct list_head *cpu_list, local_list;
24857
24858diff --git a/block/bsg.c b/block/bsg.c
24859index 702f131..37808bf 100644
24860--- a/block/bsg.c
24861+++ b/block/bsg.c
24862@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
24863 struct sg_io_v4 *hdr, struct bsg_device *bd,
24864 fmode_t has_write_perm)
24865 {
24866+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24867+ unsigned char *cmdptr;
24868+
24869 if (hdr->request_len > BLK_MAX_CDB) {
24870 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24871 if (!rq->cmd)
24872 return -ENOMEM;
24873- }
24874+ cmdptr = rq->cmd;
24875+ } else
24876+ cmdptr = tmpcmd;
24877
24878- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
24879+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
24880 hdr->request_len))
24881 return -EFAULT;
24882
24883+ if (cmdptr != rq->cmd)
24884+ memcpy(rq->cmd, cmdptr, hdr->request_len);
24885+
24886 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24887 if (blk_verify_command(rq->cmd, has_write_perm))
24888 return -EPERM;
24889diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
24890index 7b72502..646105c 100644
24891--- a/block/compat_ioctl.c
24892+++ b/block/compat_ioctl.c
24893@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
24894 err |= __get_user(f->spec1, &uf->spec1);
24895 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
24896 err |= __get_user(name, &uf->name);
24897- f->name = compat_ptr(name);
24898+ f->name = (void __force_kernel *)compat_ptr(name);
24899 if (err) {
24900 err = -EFAULT;
24901 goto out;
24902diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
24903index 688be8a..8a37d98 100644
24904--- a/block/scsi_ioctl.c
24905+++ b/block/scsi_ioctl.c
24906@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
24907 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24908 struct sg_io_hdr *hdr, fmode_t mode)
24909 {
24910- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24911+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24912+ unsigned char *cmdptr;
24913+
24914+ if (rq->cmd != rq->__cmd)
24915+ cmdptr = rq->cmd;
24916+ else
24917+ cmdptr = tmpcmd;
24918+
24919+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24920 return -EFAULT;
24921+
24922+ if (cmdptr != rq->cmd)
24923+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24924+
24925 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24926 return -EPERM;
24927
24928@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24929 int err;
24930 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24931 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24932+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24933+ unsigned char *cmdptr;
24934
24935 if (!sic)
24936 return -EINVAL;
24937@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24938 */
24939 err = -EFAULT;
24940 rq->cmd_len = cmdlen;
24941- if (copy_from_user(rq->cmd, sic->data, cmdlen))
24942+
24943+ if (rq->cmd != rq->__cmd)
24944+ cmdptr = rq->cmd;
24945+ else
24946+ cmdptr = tmpcmd;
24947+
24948+ if (copy_from_user(cmdptr, sic->data, cmdlen))
24949 goto error;
24950
24951+ if (rq->cmd != cmdptr)
24952+ memcpy(rq->cmd, cmdptr, cmdlen);
24953+
24954 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24955 goto error;
24956
24957diff --git a/crypto/cryptd.c b/crypto/cryptd.c
24958index 671d4d6..5f24030 100644
24959--- a/crypto/cryptd.c
24960+++ b/crypto/cryptd.c
24961@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
24962
24963 struct cryptd_blkcipher_request_ctx {
24964 crypto_completion_t complete;
24965-};
24966+} __no_const;
24967
24968 struct cryptd_hash_ctx {
24969 struct crypto_shash *child;
24970@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
24971
24972 struct cryptd_aead_request_ctx {
24973 crypto_completion_t complete;
24974-};
24975+} __no_const;
24976
24977 static void cryptd_queue_worker(struct work_struct *work);
24978
24979diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
24980index 5d41894..22021e4 100644
24981--- a/drivers/acpi/apei/cper.c
24982+++ b/drivers/acpi/apei/cper.c
24983@@ -38,12 +38,12 @@
24984 */
24985 u64 cper_next_record_id(void)
24986 {
24987- static atomic64_t seq;
24988+ static atomic64_unchecked_t seq;
24989
24990- if (!atomic64_read(&seq))
24991- atomic64_set(&seq, ((u64)get_seconds()) << 32);
24992+ if (!atomic64_read_unchecked(&seq))
24993+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
24994
24995- return atomic64_inc_return(&seq);
24996+ return atomic64_inc_return_unchecked(&seq);
24997 }
24998 EXPORT_SYMBOL_GPL(cper_next_record_id);
24999
25000diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
25001index 6c47ae9..8ab9132 100644
25002--- a/drivers/acpi/ec_sys.c
25003+++ b/drivers/acpi/ec_sys.c
25004@@ -12,6 +12,7 @@
25005 #include <linux/acpi.h>
25006 #include <linux/debugfs.h>
25007 #include <linux/module.h>
25008+#include <asm/uaccess.h>
25009 #include "internal.h"
25010
25011 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
25012@@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25013 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25014 */
25015 unsigned int size = EC_SPACE_SIZE;
25016- u8 *data = (u8 *) buf;
25017+ u8 data;
25018 loff_t init_off = *off;
25019 int err = 0;
25020
25021@@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25022 size = count;
25023
25024 while (size) {
25025- err = ec_read(*off, &data[*off - init_off]);
25026+ err = ec_read(*off, &data);
25027 if (err)
25028 return err;
25029+ if (put_user(data, &buf[*off - init_off]))
25030+ return -EFAULT;
25031 *off += 1;
25032 size--;
25033 }
25034@@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25035
25036 unsigned int size = count;
25037 loff_t init_off = *off;
25038- u8 *data = (u8 *) buf;
25039 int err = 0;
25040
25041 if (*off >= EC_SPACE_SIZE)
25042@@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25043 }
25044
25045 while (size) {
25046- u8 byte_write = data[*off - init_off];
25047+ u8 byte_write;
25048+ if (get_user(byte_write, &buf[*off - init_off]))
25049+ return -EFAULT;
25050 err = ec_write(*off, byte_write);
25051 if (err)
25052 return err;
25053diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25054index 251c7b62..000462d 100644
25055--- a/drivers/acpi/proc.c
25056+++ b/drivers/acpi/proc.c
25057@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
25058 size_t count, loff_t * ppos)
25059 {
25060 struct list_head *node, *next;
25061- char strbuf[5];
25062- char str[5] = "";
25063- unsigned int len = count;
25064+ char strbuf[5] = {0};
25065
25066- if (len > 4)
25067- len = 4;
25068- if (len < 0)
25069+ if (count > 4)
25070+ count = 4;
25071+ if (copy_from_user(strbuf, buffer, count))
25072 return -EFAULT;
25073-
25074- if (copy_from_user(strbuf, buffer, len))
25075- return -EFAULT;
25076- strbuf[len] = '\0';
25077- sscanf(strbuf, "%s", str);
25078+ strbuf[count] = '\0';
25079
25080 mutex_lock(&acpi_device_lock);
25081 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25082@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
25083 if (!dev->wakeup.flags.valid)
25084 continue;
25085
25086- if (!strncmp(dev->pnp.bus_id, str, 4)) {
25087+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25088 if (device_can_wakeup(&dev->dev)) {
25089 bool enable = !device_may_wakeup(&dev->dev);
25090 device_set_wakeup_enable(&dev->dev, enable);
25091diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25092index 9d7bc9f..a6fc091 100644
25093--- a/drivers/acpi/processor_driver.c
25094+++ b/drivers/acpi/processor_driver.c
25095@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25096 return 0;
25097 #endif
25098
25099- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25100+ BUG_ON(pr->id >= nr_cpu_ids);
25101
25102 /*
25103 * Buggy BIOS check
25104diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25105index c04ad68..0b99473 100644
25106--- a/drivers/ata/libata-core.c
25107+++ b/drivers/ata/libata-core.c
25108@@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25109 struct ata_port *ap;
25110 unsigned int tag;
25111
25112- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25113+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25114 ap = qc->ap;
25115
25116 qc->flags = 0;
25117@@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25118 struct ata_port *ap;
25119 struct ata_link *link;
25120
25121- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25122+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25123 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25124 ap = qc->ap;
25125 link = qc->dev->link;
25126@@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25127 return;
25128
25129 spin_lock(&lock);
25130+ pax_open_kernel();
25131
25132 for (cur = ops->inherits; cur; cur = cur->inherits) {
25133 void **inherit = (void **)cur;
25134@@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25135 if (IS_ERR(*pp))
25136 *pp = NULL;
25137
25138- ops->inherits = NULL;
25139+ *(struct ata_port_operations **)&ops->inherits = NULL;
25140
25141+ pax_close_kernel();
25142 spin_unlock(&lock);
25143 }
25144
25145diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25146index e8574bb..f9f6a72 100644
25147--- a/drivers/ata/pata_arasan_cf.c
25148+++ b/drivers/ata/pata_arasan_cf.c
25149@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25150 /* Handle platform specific quirks */
25151 if (pdata->quirk) {
25152 if (pdata->quirk & CF_BROKEN_PIO) {
25153- ap->ops->set_piomode = NULL;
25154+ pax_open_kernel();
25155+ *(void **)&ap->ops->set_piomode = NULL;
25156+ pax_close_kernel();
25157 ap->pio_mask = 0;
25158 }
25159 if (pdata->quirk & CF_BROKEN_MWDMA)
25160diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25161index f9b983a..887b9d8 100644
25162--- a/drivers/atm/adummy.c
25163+++ b/drivers/atm/adummy.c
25164@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25165 vcc->pop(vcc, skb);
25166 else
25167 dev_kfree_skb_any(skb);
25168- atomic_inc(&vcc->stats->tx);
25169+ atomic_inc_unchecked(&vcc->stats->tx);
25170
25171 return 0;
25172 }
25173diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25174index f8f41e0..1f987dd 100644
25175--- a/drivers/atm/ambassador.c
25176+++ b/drivers/atm/ambassador.c
25177@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25178 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25179
25180 // VC layer stats
25181- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25182+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25183
25184 // free the descriptor
25185 kfree (tx_descr);
25186@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25187 dump_skb ("<<<", vc, skb);
25188
25189 // VC layer stats
25190- atomic_inc(&atm_vcc->stats->rx);
25191+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25192 __net_timestamp(skb);
25193 // end of our responsibility
25194 atm_vcc->push (atm_vcc, skb);
25195@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25196 } else {
25197 PRINTK (KERN_INFO, "dropped over-size frame");
25198 // should we count this?
25199- atomic_inc(&atm_vcc->stats->rx_drop);
25200+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25201 }
25202
25203 } else {
25204@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25205 }
25206
25207 if (check_area (skb->data, skb->len)) {
25208- atomic_inc(&atm_vcc->stats->tx_err);
25209+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25210 return -ENOMEM; // ?
25211 }
25212
25213diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25214index b22d71c..d6e1049 100644
25215--- a/drivers/atm/atmtcp.c
25216+++ b/drivers/atm/atmtcp.c
25217@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25218 if (vcc->pop) vcc->pop(vcc,skb);
25219 else dev_kfree_skb(skb);
25220 if (dev_data) return 0;
25221- atomic_inc(&vcc->stats->tx_err);
25222+ atomic_inc_unchecked(&vcc->stats->tx_err);
25223 return -ENOLINK;
25224 }
25225 size = skb->len+sizeof(struct atmtcp_hdr);
25226@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25227 if (!new_skb) {
25228 if (vcc->pop) vcc->pop(vcc,skb);
25229 else dev_kfree_skb(skb);
25230- atomic_inc(&vcc->stats->tx_err);
25231+ atomic_inc_unchecked(&vcc->stats->tx_err);
25232 return -ENOBUFS;
25233 }
25234 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25235@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25236 if (vcc->pop) vcc->pop(vcc,skb);
25237 else dev_kfree_skb(skb);
25238 out_vcc->push(out_vcc,new_skb);
25239- atomic_inc(&vcc->stats->tx);
25240- atomic_inc(&out_vcc->stats->rx);
25241+ atomic_inc_unchecked(&vcc->stats->tx);
25242+ atomic_inc_unchecked(&out_vcc->stats->rx);
25243 return 0;
25244 }
25245
25246@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25247 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25248 read_unlock(&vcc_sklist_lock);
25249 if (!out_vcc) {
25250- atomic_inc(&vcc->stats->tx_err);
25251+ atomic_inc_unchecked(&vcc->stats->tx_err);
25252 goto done;
25253 }
25254 skb_pull(skb,sizeof(struct atmtcp_hdr));
25255@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25256 __net_timestamp(new_skb);
25257 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25258 out_vcc->push(out_vcc,new_skb);
25259- atomic_inc(&vcc->stats->tx);
25260- atomic_inc(&out_vcc->stats->rx);
25261+ atomic_inc_unchecked(&vcc->stats->tx);
25262+ atomic_inc_unchecked(&out_vcc->stats->rx);
25263 done:
25264 if (vcc->pop) vcc->pop(vcc,skb);
25265 else dev_kfree_skb(skb);
25266diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25267index 956e9ac..133516d 100644
25268--- a/drivers/atm/eni.c
25269+++ b/drivers/atm/eni.c
25270@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25271 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25272 vcc->dev->number);
25273 length = 0;
25274- atomic_inc(&vcc->stats->rx_err);
25275+ atomic_inc_unchecked(&vcc->stats->rx_err);
25276 }
25277 else {
25278 length = ATM_CELL_SIZE-1; /* no HEC */
25279@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25280 size);
25281 }
25282 eff = length = 0;
25283- atomic_inc(&vcc->stats->rx_err);
25284+ atomic_inc_unchecked(&vcc->stats->rx_err);
25285 }
25286 else {
25287 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25288@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25289 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25290 vcc->dev->number,vcc->vci,length,size << 2,descr);
25291 length = eff = 0;
25292- atomic_inc(&vcc->stats->rx_err);
25293+ atomic_inc_unchecked(&vcc->stats->rx_err);
25294 }
25295 }
25296 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25297@@ -771,7 +771,7 @@ rx_dequeued++;
25298 vcc->push(vcc,skb);
25299 pushed++;
25300 }
25301- atomic_inc(&vcc->stats->rx);
25302+ atomic_inc_unchecked(&vcc->stats->rx);
25303 }
25304 wake_up(&eni_dev->rx_wait);
25305 }
25306@@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
25307 PCI_DMA_TODEVICE);
25308 if (vcc->pop) vcc->pop(vcc,skb);
25309 else dev_kfree_skb_irq(skb);
25310- atomic_inc(&vcc->stats->tx);
25311+ atomic_inc_unchecked(&vcc->stats->tx);
25312 wake_up(&eni_dev->tx_wait);
25313 dma_complete++;
25314 }
25315@@ -1569,7 +1569,7 @@ tx_complete++;
25316 /*--------------------------------- entries ---------------------------------*/
25317
25318
25319-static const char *media_name[] __devinitdata = {
25320+static const char *media_name[] __devinitconst = {
25321 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25322 "UTP", "05?", "06?", "07?", /* 4- 7 */
25323 "TAXI","09?", "10?", "11?", /* 8-11 */
25324diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25325index 5072f8a..fa52520d 100644
25326--- a/drivers/atm/firestream.c
25327+++ b/drivers/atm/firestream.c
25328@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25329 }
25330 }
25331
25332- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25333+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25334
25335 fs_dprintk (FS_DEBUG_TXMEM, "i");
25336 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25337@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25338 #endif
25339 skb_put (skb, qe->p1 & 0xffff);
25340 ATM_SKB(skb)->vcc = atm_vcc;
25341- atomic_inc(&atm_vcc->stats->rx);
25342+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25343 __net_timestamp(skb);
25344 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25345 atm_vcc->push (atm_vcc, skb);
25346@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25347 kfree (pe);
25348 }
25349 if (atm_vcc)
25350- atomic_inc(&atm_vcc->stats->rx_drop);
25351+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25352 break;
25353 case 0x1f: /* Reassembly abort: no buffers. */
25354 /* Silently increment error counter. */
25355 if (atm_vcc)
25356- atomic_inc(&atm_vcc->stats->rx_drop);
25357+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25358 break;
25359 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25360 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25361diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25362index 361f5ae..7fc552d 100644
25363--- a/drivers/atm/fore200e.c
25364+++ b/drivers/atm/fore200e.c
25365@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25366 #endif
25367 /* check error condition */
25368 if (*entry->status & STATUS_ERROR)
25369- atomic_inc(&vcc->stats->tx_err);
25370+ atomic_inc_unchecked(&vcc->stats->tx_err);
25371 else
25372- atomic_inc(&vcc->stats->tx);
25373+ atomic_inc_unchecked(&vcc->stats->tx);
25374 }
25375 }
25376
25377@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25378 if (skb == NULL) {
25379 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25380
25381- atomic_inc(&vcc->stats->rx_drop);
25382+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25383 return -ENOMEM;
25384 }
25385
25386@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25387
25388 dev_kfree_skb_any(skb);
25389
25390- atomic_inc(&vcc->stats->rx_drop);
25391+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25392 return -ENOMEM;
25393 }
25394
25395 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25396
25397 vcc->push(vcc, skb);
25398- atomic_inc(&vcc->stats->rx);
25399+ atomic_inc_unchecked(&vcc->stats->rx);
25400
25401 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25402
25403@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25404 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25405 fore200e->atm_dev->number,
25406 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25407- atomic_inc(&vcc->stats->rx_err);
25408+ atomic_inc_unchecked(&vcc->stats->rx_err);
25409 }
25410 }
25411
25412@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25413 goto retry_here;
25414 }
25415
25416- atomic_inc(&vcc->stats->tx_err);
25417+ atomic_inc_unchecked(&vcc->stats->tx_err);
25418
25419 fore200e->tx_sat++;
25420 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25421diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25422index 9a51df4..f3bb5f8 100644
25423--- a/drivers/atm/he.c
25424+++ b/drivers/atm/he.c
25425@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25426
25427 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25428 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25429- atomic_inc(&vcc->stats->rx_drop);
25430+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25431 goto return_host_buffers;
25432 }
25433
25434@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25435 RBRQ_LEN_ERR(he_dev->rbrq_head)
25436 ? "LEN_ERR" : "",
25437 vcc->vpi, vcc->vci);
25438- atomic_inc(&vcc->stats->rx_err);
25439+ atomic_inc_unchecked(&vcc->stats->rx_err);
25440 goto return_host_buffers;
25441 }
25442
25443@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25444 vcc->push(vcc, skb);
25445 spin_lock(&he_dev->global_lock);
25446
25447- atomic_inc(&vcc->stats->rx);
25448+ atomic_inc_unchecked(&vcc->stats->rx);
25449
25450 return_host_buffers:
25451 ++pdus_assembled;
25452@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25453 tpd->vcc->pop(tpd->vcc, tpd->skb);
25454 else
25455 dev_kfree_skb_any(tpd->skb);
25456- atomic_inc(&tpd->vcc->stats->tx_err);
25457+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25458 }
25459 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25460 return;
25461@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25462 vcc->pop(vcc, skb);
25463 else
25464 dev_kfree_skb_any(skb);
25465- atomic_inc(&vcc->stats->tx_err);
25466+ atomic_inc_unchecked(&vcc->stats->tx_err);
25467 return -EINVAL;
25468 }
25469
25470@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25471 vcc->pop(vcc, skb);
25472 else
25473 dev_kfree_skb_any(skb);
25474- atomic_inc(&vcc->stats->tx_err);
25475+ atomic_inc_unchecked(&vcc->stats->tx_err);
25476 return -EINVAL;
25477 }
25478 #endif
25479@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25480 vcc->pop(vcc, skb);
25481 else
25482 dev_kfree_skb_any(skb);
25483- atomic_inc(&vcc->stats->tx_err);
25484+ atomic_inc_unchecked(&vcc->stats->tx_err);
25485 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25486 return -ENOMEM;
25487 }
25488@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25489 vcc->pop(vcc, skb);
25490 else
25491 dev_kfree_skb_any(skb);
25492- atomic_inc(&vcc->stats->tx_err);
25493+ atomic_inc_unchecked(&vcc->stats->tx_err);
25494 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25495 return -ENOMEM;
25496 }
25497@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25498 __enqueue_tpd(he_dev, tpd, cid);
25499 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25500
25501- atomic_inc(&vcc->stats->tx);
25502+ atomic_inc_unchecked(&vcc->stats->tx);
25503
25504 return 0;
25505 }
25506diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25507index b812103..e391a49 100644
25508--- a/drivers/atm/horizon.c
25509+++ b/drivers/atm/horizon.c
25510@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25511 {
25512 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25513 // VC layer stats
25514- atomic_inc(&vcc->stats->rx);
25515+ atomic_inc_unchecked(&vcc->stats->rx);
25516 __net_timestamp(skb);
25517 // end of our responsibility
25518 vcc->push (vcc, skb);
25519@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25520 dev->tx_iovec = NULL;
25521
25522 // VC layer stats
25523- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25524+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25525
25526 // free the skb
25527 hrz_kfree_skb (skb);
25528diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25529index 1c05212..c28e200 100644
25530--- a/drivers/atm/idt77252.c
25531+++ b/drivers/atm/idt77252.c
25532@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25533 else
25534 dev_kfree_skb(skb);
25535
25536- atomic_inc(&vcc->stats->tx);
25537+ atomic_inc_unchecked(&vcc->stats->tx);
25538 }
25539
25540 atomic_dec(&scq->used);
25541@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25542 if ((sb = dev_alloc_skb(64)) == NULL) {
25543 printk("%s: Can't allocate buffers for aal0.\n",
25544 card->name);
25545- atomic_add(i, &vcc->stats->rx_drop);
25546+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
25547 break;
25548 }
25549 if (!atm_charge(vcc, sb->truesize)) {
25550 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25551 card->name);
25552- atomic_add(i - 1, &vcc->stats->rx_drop);
25553+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25554 dev_kfree_skb(sb);
25555 break;
25556 }
25557@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25558 ATM_SKB(sb)->vcc = vcc;
25559 __net_timestamp(sb);
25560 vcc->push(vcc, sb);
25561- atomic_inc(&vcc->stats->rx);
25562+ atomic_inc_unchecked(&vcc->stats->rx);
25563
25564 cell += ATM_CELL_PAYLOAD;
25565 }
25566@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25567 "(CDC: %08x)\n",
25568 card->name, len, rpp->len, readl(SAR_REG_CDC));
25569 recycle_rx_pool_skb(card, rpp);
25570- atomic_inc(&vcc->stats->rx_err);
25571+ atomic_inc_unchecked(&vcc->stats->rx_err);
25572 return;
25573 }
25574 if (stat & SAR_RSQE_CRC) {
25575 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25576 recycle_rx_pool_skb(card, rpp);
25577- atomic_inc(&vcc->stats->rx_err);
25578+ atomic_inc_unchecked(&vcc->stats->rx_err);
25579 return;
25580 }
25581 if (skb_queue_len(&rpp->queue) > 1) {
25582@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25583 RXPRINTK("%s: Can't alloc RX skb.\n",
25584 card->name);
25585 recycle_rx_pool_skb(card, rpp);
25586- atomic_inc(&vcc->stats->rx_err);
25587+ atomic_inc_unchecked(&vcc->stats->rx_err);
25588 return;
25589 }
25590 if (!atm_charge(vcc, skb->truesize)) {
25591@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25592 __net_timestamp(skb);
25593
25594 vcc->push(vcc, skb);
25595- atomic_inc(&vcc->stats->rx);
25596+ atomic_inc_unchecked(&vcc->stats->rx);
25597
25598 return;
25599 }
25600@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25601 __net_timestamp(skb);
25602
25603 vcc->push(vcc, skb);
25604- atomic_inc(&vcc->stats->rx);
25605+ atomic_inc_unchecked(&vcc->stats->rx);
25606
25607 if (skb->truesize > SAR_FB_SIZE_3)
25608 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25609@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25610 if (vcc->qos.aal != ATM_AAL0) {
25611 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25612 card->name, vpi, vci);
25613- atomic_inc(&vcc->stats->rx_drop);
25614+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25615 goto drop;
25616 }
25617
25618 if ((sb = dev_alloc_skb(64)) == NULL) {
25619 printk("%s: Can't allocate buffers for AAL0.\n",
25620 card->name);
25621- atomic_inc(&vcc->stats->rx_err);
25622+ atomic_inc_unchecked(&vcc->stats->rx_err);
25623 goto drop;
25624 }
25625
25626@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25627 ATM_SKB(sb)->vcc = vcc;
25628 __net_timestamp(sb);
25629 vcc->push(vcc, sb);
25630- atomic_inc(&vcc->stats->rx);
25631+ atomic_inc_unchecked(&vcc->stats->rx);
25632
25633 drop:
25634 skb_pull(queue, 64);
25635@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25636
25637 if (vc == NULL) {
25638 printk("%s: NULL connection in send().\n", card->name);
25639- atomic_inc(&vcc->stats->tx_err);
25640+ atomic_inc_unchecked(&vcc->stats->tx_err);
25641 dev_kfree_skb(skb);
25642 return -EINVAL;
25643 }
25644 if (!test_bit(VCF_TX, &vc->flags)) {
25645 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25646- atomic_inc(&vcc->stats->tx_err);
25647+ atomic_inc_unchecked(&vcc->stats->tx_err);
25648 dev_kfree_skb(skb);
25649 return -EINVAL;
25650 }
25651@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25652 break;
25653 default:
25654 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25655- atomic_inc(&vcc->stats->tx_err);
25656+ atomic_inc_unchecked(&vcc->stats->tx_err);
25657 dev_kfree_skb(skb);
25658 return -EINVAL;
25659 }
25660
25661 if (skb_shinfo(skb)->nr_frags != 0) {
25662 printk("%s: No scatter-gather yet.\n", card->name);
25663- atomic_inc(&vcc->stats->tx_err);
25664+ atomic_inc_unchecked(&vcc->stats->tx_err);
25665 dev_kfree_skb(skb);
25666 return -EINVAL;
25667 }
25668@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25669
25670 err = queue_skb(card, vc, skb, oam);
25671 if (err) {
25672- atomic_inc(&vcc->stats->tx_err);
25673+ atomic_inc_unchecked(&vcc->stats->tx_err);
25674 dev_kfree_skb(skb);
25675 return err;
25676 }
25677@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25678 skb = dev_alloc_skb(64);
25679 if (!skb) {
25680 printk("%s: Out of memory in send_oam().\n", card->name);
25681- atomic_inc(&vcc->stats->tx_err);
25682+ atomic_inc_unchecked(&vcc->stats->tx_err);
25683 return -ENOMEM;
25684 }
25685 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25686diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25687index 3d0c2b0..45441fa 100644
25688--- a/drivers/atm/iphase.c
25689+++ b/drivers/atm/iphase.c
25690@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25691 status = (u_short) (buf_desc_ptr->desc_mode);
25692 if (status & (RX_CER | RX_PTE | RX_OFL))
25693 {
25694- atomic_inc(&vcc->stats->rx_err);
25695+ atomic_inc_unchecked(&vcc->stats->rx_err);
25696 IF_ERR(printk("IA: bad packet, dropping it");)
25697 if (status & RX_CER) {
25698 IF_ERR(printk(" cause: packet CRC error\n");)
25699@@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
25700 len = dma_addr - buf_addr;
25701 if (len > iadev->rx_buf_sz) {
25702 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25703- atomic_inc(&vcc->stats->rx_err);
25704+ atomic_inc_unchecked(&vcc->stats->rx_err);
25705 goto out_free_desc;
25706 }
25707
25708@@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25709 ia_vcc = INPH_IA_VCC(vcc);
25710 if (ia_vcc == NULL)
25711 {
25712- atomic_inc(&vcc->stats->rx_err);
25713+ atomic_inc_unchecked(&vcc->stats->rx_err);
25714 dev_kfree_skb_any(skb);
25715 atm_return(vcc, atm_guess_pdu2truesize(len));
25716 goto INCR_DLE;
25717@@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25718 if ((length > iadev->rx_buf_sz) || (length >
25719 (skb->len - sizeof(struct cpcs_trailer))))
25720 {
25721- atomic_inc(&vcc->stats->rx_err);
25722+ atomic_inc_unchecked(&vcc->stats->rx_err);
25723 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25724 length, skb->len);)
25725 dev_kfree_skb_any(skb);
25726@@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25727
25728 IF_RX(printk("rx_dle_intr: skb push");)
25729 vcc->push(vcc,skb);
25730- atomic_inc(&vcc->stats->rx);
25731+ atomic_inc_unchecked(&vcc->stats->rx);
25732 iadev->rx_pkt_cnt++;
25733 }
25734 INCR_DLE:
25735@@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
25736 {
25737 struct k_sonet_stats *stats;
25738 stats = &PRIV(_ia_dev[board])->sonet_stats;
25739- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25740- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25741- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25742- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25743- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25744- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25745- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25746- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25747- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25748+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25749+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25750+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25751+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25752+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25753+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25754+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25755+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25756+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25757 }
25758 ia_cmds.status = 0;
25759 break;
25760@@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25761 if ((desc == 0) || (desc > iadev->num_tx_desc))
25762 {
25763 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25764- atomic_inc(&vcc->stats->tx);
25765+ atomic_inc_unchecked(&vcc->stats->tx);
25766 if (vcc->pop)
25767 vcc->pop(vcc, skb);
25768 else
25769@@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25770 ATM_DESC(skb) = vcc->vci;
25771 skb_queue_tail(&iadev->tx_dma_q, skb);
25772
25773- atomic_inc(&vcc->stats->tx);
25774+ atomic_inc_unchecked(&vcc->stats->tx);
25775 iadev->tx_pkt_cnt++;
25776 /* Increment transaction counter */
25777 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25778
25779 #if 0
25780 /* add flow control logic */
25781- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25782+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25783 if (iavcc->vc_desc_cnt > 10) {
25784 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25785 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25786diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
25787index f556969..0da15eb 100644
25788--- a/drivers/atm/lanai.c
25789+++ b/drivers/atm/lanai.c
25790@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
25791 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25792 lanai_endtx(lanai, lvcc);
25793 lanai_free_skb(lvcc->tx.atmvcc, skb);
25794- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25795+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25796 }
25797
25798 /* Try to fill the buffer - don't call unless there is backlog */
25799@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
25800 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25801 __net_timestamp(skb);
25802 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25803- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25804+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25805 out:
25806 lvcc->rx.buf.ptr = end;
25807 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25808@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25809 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25810 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25811 lanai->stats.service_rxnotaal5++;
25812- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25813+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25814 return 0;
25815 }
25816 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25817@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25818 int bytes;
25819 read_unlock(&vcc_sklist_lock);
25820 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25821- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25822+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25823 lvcc->stats.x.aal5.service_trash++;
25824 bytes = (SERVICE_GET_END(s) * 16) -
25825 (((unsigned long) lvcc->rx.buf.ptr) -
25826@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25827 }
25828 if (s & SERVICE_STREAM) {
25829 read_unlock(&vcc_sklist_lock);
25830- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25831+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25832 lvcc->stats.x.aal5.service_stream++;
25833 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25834 "PDU on VCI %d!\n", lanai->number, vci);
25835@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25836 return 0;
25837 }
25838 DPRINTK("got rx crc error on vci %d\n", vci);
25839- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25840+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25841 lvcc->stats.x.aal5.service_rxcrc++;
25842 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25843 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25844diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
25845index 1c70c45..300718d 100644
25846--- a/drivers/atm/nicstar.c
25847+++ b/drivers/atm/nicstar.c
25848@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25849 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
25850 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
25851 card->index);
25852- atomic_inc(&vcc->stats->tx_err);
25853+ atomic_inc_unchecked(&vcc->stats->tx_err);
25854 dev_kfree_skb_any(skb);
25855 return -EINVAL;
25856 }
25857@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25858 if (!vc->tx) {
25859 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
25860 card->index);
25861- atomic_inc(&vcc->stats->tx_err);
25862+ atomic_inc_unchecked(&vcc->stats->tx_err);
25863 dev_kfree_skb_any(skb);
25864 return -EINVAL;
25865 }
25866@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25867 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
25868 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
25869 card->index);
25870- atomic_inc(&vcc->stats->tx_err);
25871+ atomic_inc_unchecked(&vcc->stats->tx_err);
25872 dev_kfree_skb_any(skb);
25873 return -EINVAL;
25874 }
25875
25876 if (skb_shinfo(skb)->nr_frags != 0) {
25877 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25878- atomic_inc(&vcc->stats->tx_err);
25879+ atomic_inc_unchecked(&vcc->stats->tx_err);
25880 dev_kfree_skb_any(skb);
25881 return -EINVAL;
25882 }
25883@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25884 }
25885
25886 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
25887- atomic_inc(&vcc->stats->tx_err);
25888+ atomic_inc_unchecked(&vcc->stats->tx_err);
25889 dev_kfree_skb_any(skb);
25890 return -EIO;
25891 }
25892- atomic_inc(&vcc->stats->tx);
25893+ atomic_inc_unchecked(&vcc->stats->tx);
25894
25895 return 0;
25896 }
25897@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25898 printk
25899 ("nicstar%d: Can't allocate buffers for aal0.\n",
25900 card->index);
25901- atomic_add(i, &vcc->stats->rx_drop);
25902+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
25903 break;
25904 }
25905 if (!atm_charge(vcc, sb->truesize)) {
25906 RXPRINTK
25907 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
25908 card->index);
25909- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25910+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25911 dev_kfree_skb_any(sb);
25912 break;
25913 }
25914@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25915 ATM_SKB(sb)->vcc = vcc;
25916 __net_timestamp(sb);
25917 vcc->push(vcc, sb);
25918- atomic_inc(&vcc->stats->rx);
25919+ atomic_inc_unchecked(&vcc->stats->rx);
25920 cell += ATM_CELL_PAYLOAD;
25921 }
25922
25923@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25924 if (iovb == NULL) {
25925 printk("nicstar%d: Out of iovec buffers.\n",
25926 card->index);
25927- atomic_inc(&vcc->stats->rx_drop);
25928+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25929 recycle_rx_buf(card, skb);
25930 return;
25931 }
25932@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25933 small or large buffer itself. */
25934 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
25935 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
25936- atomic_inc(&vcc->stats->rx_err);
25937+ atomic_inc_unchecked(&vcc->stats->rx_err);
25938 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25939 NS_MAX_IOVECS);
25940 NS_PRV_IOVCNT(iovb) = 0;
25941@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25942 ("nicstar%d: Expected a small buffer, and this is not one.\n",
25943 card->index);
25944 which_list(card, skb);
25945- atomic_inc(&vcc->stats->rx_err);
25946+ atomic_inc_unchecked(&vcc->stats->rx_err);
25947 recycle_rx_buf(card, skb);
25948 vc->rx_iov = NULL;
25949 recycle_iov_buf(card, iovb);
25950@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25951 ("nicstar%d: Expected a large buffer, and this is not one.\n",
25952 card->index);
25953 which_list(card, skb);
25954- atomic_inc(&vcc->stats->rx_err);
25955+ atomic_inc_unchecked(&vcc->stats->rx_err);
25956 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25957 NS_PRV_IOVCNT(iovb));
25958 vc->rx_iov = NULL;
25959@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25960 printk(" - PDU size mismatch.\n");
25961 else
25962 printk(".\n");
25963- atomic_inc(&vcc->stats->rx_err);
25964+ atomic_inc_unchecked(&vcc->stats->rx_err);
25965 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25966 NS_PRV_IOVCNT(iovb));
25967 vc->rx_iov = NULL;
25968@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25969 /* skb points to a small buffer */
25970 if (!atm_charge(vcc, skb->truesize)) {
25971 push_rxbufs(card, skb);
25972- atomic_inc(&vcc->stats->rx_drop);
25973+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25974 } else {
25975 skb_put(skb, len);
25976 dequeue_sm_buf(card, skb);
25977@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25978 ATM_SKB(skb)->vcc = vcc;
25979 __net_timestamp(skb);
25980 vcc->push(vcc, skb);
25981- atomic_inc(&vcc->stats->rx);
25982+ atomic_inc_unchecked(&vcc->stats->rx);
25983 }
25984 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
25985 struct sk_buff *sb;
25986@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25987 if (len <= NS_SMBUFSIZE) {
25988 if (!atm_charge(vcc, sb->truesize)) {
25989 push_rxbufs(card, sb);
25990- atomic_inc(&vcc->stats->rx_drop);
25991+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25992 } else {
25993 skb_put(sb, len);
25994 dequeue_sm_buf(card, sb);
25995@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25996 ATM_SKB(sb)->vcc = vcc;
25997 __net_timestamp(sb);
25998 vcc->push(vcc, sb);
25999- atomic_inc(&vcc->stats->rx);
26000+ atomic_inc_unchecked(&vcc->stats->rx);
26001 }
26002
26003 push_rxbufs(card, skb);
26004@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26005
26006 if (!atm_charge(vcc, skb->truesize)) {
26007 push_rxbufs(card, skb);
26008- atomic_inc(&vcc->stats->rx_drop);
26009+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26010 } else {
26011 dequeue_lg_buf(card, skb);
26012 #ifdef NS_USE_DESTRUCTORS
26013@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26014 ATM_SKB(skb)->vcc = vcc;
26015 __net_timestamp(skb);
26016 vcc->push(vcc, skb);
26017- atomic_inc(&vcc->stats->rx);
26018+ atomic_inc_unchecked(&vcc->stats->rx);
26019 }
26020
26021 push_rxbufs(card, sb);
26022@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26023 printk
26024 ("nicstar%d: Out of huge buffers.\n",
26025 card->index);
26026- atomic_inc(&vcc->stats->rx_drop);
26027+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26028 recycle_iovec_rx_bufs(card,
26029 (struct iovec *)
26030 iovb->data,
26031@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26032 card->hbpool.count++;
26033 } else
26034 dev_kfree_skb_any(hb);
26035- atomic_inc(&vcc->stats->rx_drop);
26036+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26037 } else {
26038 /* Copy the small buffer to the huge buffer */
26039 sb = (struct sk_buff *)iov->iov_base;
26040@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26041 #endif /* NS_USE_DESTRUCTORS */
26042 __net_timestamp(hb);
26043 vcc->push(vcc, hb);
26044- atomic_inc(&vcc->stats->rx);
26045+ atomic_inc_unchecked(&vcc->stats->rx);
26046 }
26047 }
26048
26049diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26050index 5d1d076..12fbca4 100644
26051--- a/drivers/atm/solos-pci.c
26052+++ b/drivers/atm/solos-pci.c
26053@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26054 }
26055 atm_charge(vcc, skb->truesize);
26056 vcc->push(vcc, skb);
26057- atomic_inc(&vcc->stats->rx);
26058+ atomic_inc_unchecked(&vcc->stats->rx);
26059 break;
26060
26061 case PKT_STATUS:
26062@@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26063 vcc = SKB_CB(oldskb)->vcc;
26064
26065 if (vcc) {
26066- atomic_inc(&vcc->stats->tx);
26067+ atomic_inc_unchecked(&vcc->stats->tx);
26068 solos_pop(vcc, oldskb);
26069 } else
26070 dev_kfree_skb_irq(oldskb);
26071diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26072index 90f1ccc..04c4a1e 100644
26073--- a/drivers/atm/suni.c
26074+++ b/drivers/atm/suni.c
26075@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26076
26077
26078 #define ADD_LIMITED(s,v) \
26079- atomic_add((v),&stats->s); \
26080- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26081+ atomic_add_unchecked((v),&stats->s); \
26082+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26083
26084
26085 static void suni_hz(unsigned long from_timer)
26086diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26087index 5120a96..e2572bd 100644
26088--- a/drivers/atm/uPD98402.c
26089+++ b/drivers/atm/uPD98402.c
26090@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26091 struct sonet_stats tmp;
26092 int error = 0;
26093
26094- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26095+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26096 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26097 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26098 if (zero && !error) {
26099@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26100
26101
26102 #define ADD_LIMITED(s,v) \
26103- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26104- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26105- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26106+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26107+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26108+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26109
26110
26111 static void stat_event(struct atm_dev *dev)
26112@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26113 if (reason & uPD98402_INT_PFM) stat_event(dev);
26114 if (reason & uPD98402_INT_PCO) {
26115 (void) GET(PCOCR); /* clear interrupt cause */
26116- atomic_add(GET(HECCT),
26117+ atomic_add_unchecked(GET(HECCT),
26118 &PRIV(dev)->sonet_stats.uncorr_hcs);
26119 }
26120 if ((reason & uPD98402_INT_RFO) &&
26121@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26122 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26123 uPD98402_INT_LOS),PIMR); /* enable them */
26124 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26125- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26126- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26127- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26128+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26129+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26130+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26131 return 0;
26132 }
26133
26134diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26135index d889f56..17eb71e 100644
26136--- a/drivers/atm/zatm.c
26137+++ b/drivers/atm/zatm.c
26138@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26139 }
26140 if (!size) {
26141 dev_kfree_skb_irq(skb);
26142- if (vcc) atomic_inc(&vcc->stats->rx_err);
26143+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26144 continue;
26145 }
26146 if (!atm_charge(vcc,skb->truesize)) {
26147@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26148 skb->len = size;
26149 ATM_SKB(skb)->vcc = vcc;
26150 vcc->push(vcc,skb);
26151- atomic_inc(&vcc->stats->rx);
26152+ atomic_inc_unchecked(&vcc->stats->rx);
26153 }
26154 zout(pos & 0xffff,MTA(mbx));
26155 #if 0 /* probably a stupid idea */
26156@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26157 skb_queue_head(&zatm_vcc->backlog,skb);
26158 break;
26159 }
26160- atomic_inc(&vcc->stats->tx);
26161+ atomic_inc_unchecked(&vcc->stats->tx);
26162 wake_up(&zatm_vcc->tx_wait);
26163 }
26164
26165diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26166index a4760e0..51283cf 100644
26167--- a/drivers/base/devtmpfs.c
26168+++ b/drivers/base/devtmpfs.c
26169@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26170 if (!thread)
26171 return 0;
26172
26173- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26174+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26175 if (err)
26176 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26177 else
26178diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26179index caf995f..6f76697 100644
26180--- a/drivers/base/power/wakeup.c
26181+++ b/drivers/base/power/wakeup.c
26182@@ -30,14 +30,14 @@ bool events_check_enabled;
26183 * They need to be modified together atomically, so it's better to use one
26184 * atomic variable to hold them both.
26185 */
26186-static atomic_t combined_event_count = ATOMIC_INIT(0);
26187+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26188
26189 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26190 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26191
26192 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26193 {
26194- unsigned int comb = atomic_read(&combined_event_count);
26195+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
26196
26197 *cnt = (comb >> IN_PROGRESS_BITS);
26198 *inpr = comb & MAX_IN_PROGRESS;
26199@@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26200 ws->last_time = ktime_get();
26201
26202 /* Increment the counter of events in progress. */
26203- atomic_inc(&combined_event_count);
26204+ atomic_inc_unchecked(&combined_event_count);
26205 }
26206
26207 /**
26208@@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26209 * Increment the counter of registered wakeup events and decrement the
26210 * couter of wakeup events in progress simultaneously.
26211 */
26212- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26213+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26214 }
26215
26216 /**
26217diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26218index b0f553b..77b928b 100644
26219--- a/drivers/block/cciss.c
26220+++ b/drivers/block/cciss.c
26221@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26222 int err;
26223 u32 cp;
26224
26225+ memset(&arg64, 0, sizeof(arg64));
26226+
26227 err = 0;
26228 err |=
26229 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26230@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
26231 while (!list_empty(&h->reqQ)) {
26232 c = list_entry(h->reqQ.next, CommandList_struct, list);
26233 /* can't do anything if fifo is full */
26234- if ((h->access.fifo_full(h))) {
26235+ if ((h->access->fifo_full(h))) {
26236 dev_warn(&h->pdev->dev, "fifo full\n");
26237 break;
26238 }
26239@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
26240 h->Qdepth--;
26241
26242 /* Tell the controller execute command */
26243- h->access.submit_command(h, c);
26244+ h->access->submit_command(h, c);
26245
26246 /* Put job onto the completed Q */
26247 addQ(&h->cmpQ, c);
26248@@ -3443,17 +3445,17 @@ startio:
26249
26250 static inline unsigned long get_next_completion(ctlr_info_t *h)
26251 {
26252- return h->access.command_completed(h);
26253+ return h->access->command_completed(h);
26254 }
26255
26256 static inline int interrupt_pending(ctlr_info_t *h)
26257 {
26258- return h->access.intr_pending(h);
26259+ return h->access->intr_pending(h);
26260 }
26261
26262 static inline long interrupt_not_for_us(ctlr_info_t *h)
26263 {
26264- return ((h->access.intr_pending(h) == 0) ||
26265+ return ((h->access->intr_pending(h) == 0) ||
26266 (h->interrupts_enabled == 0));
26267 }
26268
26269@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
26270 u32 a;
26271
26272 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26273- return h->access.command_completed(h);
26274+ return h->access->command_completed(h);
26275
26276 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26277 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26278@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26279 trans_support & CFGTBL_Trans_use_short_tags);
26280
26281 /* Change the access methods to the performant access methods */
26282- h->access = SA5_performant_access;
26283+ h->access = &SA5_performant_access;
26284 h->transMethod = CFGTBL_Trans_Performant;
26285
26286 return;
26287@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26288 if (prod_index < 0)
26289 return -ENODEV;
26290 h->product_name = products[prod_index].product_name;
26291- h->access = *(products[prod_index].access);
26292+ h->access = products[prod_index].access;
26293
26294 if (cciss_board_disabled(h)) {
26295 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26296@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
26297 }
26298
26299 /* make sure the board interrupts are off */
26300- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26301+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26302 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26303 if (rc)
26304 goto clean2;
26305@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
26306 * fake ones to scoop up any residual completions.
26307 */
26308 spin_lock_irqsave(&h->lock, flags);
26309- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26310+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26311 spin_unlock_irqrestore(&h->lock, flags);
26312 free_irq(h->intr[h->intr_mode], h);
26313 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26314@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
26315 dev_info(&h->pdev->dev, "Board READY.\n");
26316 dev_info(&h->pdev->dev,
26317 "Waiting for stale completions to drain.\n");
26318- h->access.set_intr_mask(h, CCISS_INTR_ON);
26319+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26320 msleep(10000);
26321- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26322+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26323
26324 rc = controller_reset_failed(h->cfgtable);
26325 if (rc)
26326@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
26327 cciss_scsi_setup(h);
26328
26329 /* Turn the interrupts on so we can service requests */
26330- h->access.set_intr_mask(h, CCISS_INTR_ON);
26331+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26332
26333 /* Get the firmware version */
26334 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26335@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26336 kfree(flush_buf);
26337 if (return_code != IO_OK)
26338 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26339- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26340+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26341 free_irq(h->intr[h->intr_mode], h);
26342 }
26343
26344diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26345index 7fda30e..eb5dfe0 100644
26346--- a/drivers/block/cciss.h
26347+++ b/drivers/block/cciss.h
26348@@ -101,7 +101,7 @@ struct ctlr_info
26349 /* information about each logical volume */
26350 drive_info_struct *drv[CISS_MAX_LUN];
26351
26352- struct access_method access;
26353+ struct access_method *access;
26354
26355 /* queue and queue Info */
26356 struct list_head reqQ;
26357diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26358index 9125bbe..eede5c8 100644
26359--- a/drivers/block/cpqarray.c
26360+++ b/drivers/block/cpqarray.c
26361@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26362 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26363 goto Enomem4;
26364 }
26365- hba[i]->access.set_intr_mask(hba[i], 0);
26366+ hba[i]->access->set_intr_mask(hba[i], 0);
26367 if (request_irq(hba[i]->intr, do_ida_intr,
26368 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26369 {
26370@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26371 add_timer(&hba[i]->timer);
26372
26373 /* Enable IRQ now that spinlock and rate limit timer are set up */
26374- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26375+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26376
26377 for(j=0; j<NWD; j++) {
26378 struct gendisk *disk = ida_gendisk[i][j];
26379@@ -694,7 +694,7 @@ DBGINFO(
26380 for(i=0; i<NR_PRODUCTS; i++) {
26381 if (board_id == products[i].board_id) {
26382 c->product_name = products[i].product_name;
26383- c->access = *(products[i].access);
26384+ c->access = products[i].access;
26385 break;
26386 }
26387 }
26388@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26389 hba[ctlr]->intr = intr;
26390 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26391 hba[ctlr]->product_name = products[j].product_name;
26392- hba[ctlr]->access = *(products[j].access);
26393+ hba[ctlr]->access = products[j].access;
26394 hba[ctlr]->ctlr = ctlr;
26395 hba[ctlr]->board_id = board_id;
26396 hba[ctlr]->pci_dev = NULL; /* not PCI */
26397@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
26398
26399 while((c = h->reqQ) != NULL) {
26400 /* Can't do anything if we're busy */
26401- if (h->access.fifo_full(h) == 0)
26402+ if (h->access->fifo_full(h) == 0)
26403 return;
26404
26405 /* Get the first entry from the request Q */
26406@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
26407 h->Qdepth--;
26408
26409 /* Tell the controller to do our bidding */
26410- h->access.submit_command(h, c);
26411+ h->access->submit_command(h, c);
26412
26413 /* Get onto the completion Q */
26414 addQ(&h->cmpQ, c);
26415@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26416 unsigned long flags;
26417 __u32 a,a1;
26418
26419- istat = h->access.intr_pending(h);
26420+ istat = h->access->intr_pending(h);
26421 /* Is this interrupt for us? */
26422 if (istat == 0)
26423 return IRQ_NONE;
26424@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26425 */
26426 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26427 if (istat & FIFO_NOT_EMPTY) {
26428- while((a = h->access.command_completed(h))) {
26429+ while((a = h->access->command_completed(h))) {
26430 a1 = a; a &= ~3;
26431 if ((c = h->cmpQ) == NULL)
26432 {
26433@@ -1449,11 +1449,11 @@ static int sendcmd(
26434 /*
26435 * Disable interrupt
26436 */
26437- info_p->access.set_intr_mask(info_p, 0);
26438+ info_p->access->set_intr_mask(info_p, 0);
26439 /* Make sure there is room in the command FIFO */
26440 /* Actually it should be completely empty at this time. */
26441 for (i = 200000; i > 0; i--) {
26442- temp = info_p->access.fifo_full(info_p);
26443+ temp = info_p->access->fifo_full(info_p);
26444 if (temp != 0) {
26445 break;
26446 }
26447@@ -1466,7 +1466,7 @@ DBG(
26448 /*
26449 * Send the cmd
26450 */
26451- info_p->access.submit_command(info_p, c);
26452+ info_p->access->submit_command(info_p, c);
26453 complete = pollcomplete(ctlr);
26454
26455 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26456@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26457 * we check the new geometry. Then turn interrupts back on when
26458 * we're done.
26459 */
26460- host->access.set_intr_mask(host, 0);
26461+ host->access->set_intr_mask(host, 0);
26462 getgeometry(ctlr);
26463- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26464+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26465
26466 for(i=0; i<NWD; i++) {
26467 struct gendisk *disk = ida_gendisk[ctlr][i];
26468@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
26469 /* Wait (up to 2 seconds) for a command to complete */
26470
26471 for (i = 200000; i > 0; i--) {
26472- done = hba[ctlr]->access.command_completed(hba[ctlr]);
26473+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
26474 if (done == 0) {
26475 udelay(10); /* a short fixed delay */
26476 } else
26477diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26478index be73e9d..7fbf140 100644
26479--- a/drivers/block/cpqarray.h
26480+++ b/drivers/block/cpqarray.h
26481@@ -99,7 +99,7 @@ struct ctlr_info {
26482 drv_info_t drv[NWD];
26483 struct proc_dir_entry *proc;
26484
26485- struct access_method access;
26486+ struct access_method *access;
26487
26488 cmdlist_t *reqQ;
26489 cmdlist_t *cmpQ;
26490diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26491index 9cf2035..bffca95 100644
26492--- a/drivers/block/drbd/drbd_int.h
26493+++ b/drivers/block/drbd/drbd_int.h
26494@@ -736,7 +736,7 @@ struct drbd_request;
26495 struct drbd_epoch {
26496 struct list_head list;
26497 unsigned int barrier_nr;
26498- atomic_t epoch_size; /* increased on every request added. */
26499+ atomic_unchecked_t epoch_size; /* increased on every request added. */
26500 atomic_t active; /* increased on every req. added, and dec on every finished. */
26501 unsigned long flags;
26502 };
26503@@ -1108,7 +1108,7 @@ struct drbd_conf {
26504 void *int_dig_in;
26505 void *int_dig_vv;
26506 wait_queue_head_t seq_wait;
26507- atomic_t packet_seq;
26508+ atomic_unchecked_t packet_seq;
26509 unsigned int peer_seq;
26510 spinlock_t peer_seq_lock;
26511 unsigned int minor;
26512@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26513
26514 static inline void drbd_tcp_cork(struct socket *sock)
26515 {
26516- int __user val = 1;
26517+ int val = 1;
26518 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26519- (char __user *)&val, sizeof(val));
26520+ (char __force_user *)&val, sizeof(val));
26521 }
26522
26523 static inline void drbd_tcp_uncork(struct socket *sock)
26524 {
26525- int __user val = 0;
26526+ int val = 0;
26527 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26528- (char __user *)&val, sizeof(val));
26529+ (char __force_user *)&val, sizeof(val));
26530 }
26531
26532 static inline void drbd_tcp_nodelay(struct socket *sock)
26533 {
26534- int __user val = 1;
26535+ int val = 1;
26536 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26537- (char __user *)&val, sizeof(val));
26538+ (char __force_user *)&val, sizeof(val));
26539 }
26540
26541 static inline void drbd_tcp_quickack(struct socket *sock)
26542 {
26543- int __user val = 2;
26544+ int val = 2;
26545 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26546- (char __user *)&val, sizeof(val));
26547+ (char __force_user *)&val, sizeof(val));
26548 }
26549
26550 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26551diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26552index 0358e55..bc33689 100644
26553--- a/drivers/block/drbd/drbd_main.c
26554+++ b/drivers/block/drbd/drbd_main.c
26555@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26556 p.sector = sector;
26557 p.block_id = block_id;
26558 p.blksize = blksize;
26559- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26560+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26561
26562 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26563 return false;
26564@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26565 p.sector = cpu_to_be64(req->sector);
26566 p.block_id = (unsigned long)req;
26567 p.seq_num = cpu_to_be32(req->seq_num =
26568- atomic_add_return(1, &mdev->packet_seq));
26569+ atomic_add_return_unchecked(1, &mdev->packet_seq));
26570
26571 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26572
26573@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26574 atomic_set(&mdev->unacked_cnt, 0);
26575 atomic_set(&mdev->local_cnt, 0);
26576 atomic_set(&mdev->net_cnt, 0);
26577- atomic_set(&mdev->packet_seq, 0);
26578+ atomic_set_unchecked(&mdev->packet_seq, 0);
26579 atomic_set(&mdev->pp_in_use, 0);
26580 atomic_set(&mdev->pp_in_use_by_net, 0);
26581 atomic_set(&mdev->rs_sect_in, 0);
26582@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26583 mdev->receiver.t_state);
26584
26585 /* no need to lock it, I'm the only thread alive */
26586- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26587- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26588+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26589+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26590 mdev->al_writ_cnt =
26591 mdev->bm_writ_cnt =
26592 mdev->read_cnt =
26593diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26594index af2a250..219c74b 100644
26595--- a/drivers/block/drbd/drbd_nl.c
26596+++ b/drivers/block/drbd/drbd_nl.c
26597@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26598 module_put(THIS_MODULE);
26599 }
26600
26601-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26602+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26603
26604 static unsigned short *
26605 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26606@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26607 cn_reply->id.idx = CN_IDX_DRBD;
26608 cn_reply->id.val = CN_VAL_DRBD;
26609
26610- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26611+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26612 cn_reply->ack = 0; /* not used here. */
26613 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26614 (int)((char *)tl - (char *)reply->tag_list);
26615@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26616 cn_reply->id.idx = CN_IDX_DRBD;
26617 cn_reply->id.val = CN_VAL_DRBD;
26618
26619- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26620+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26621 cn_reply->ack = 0; /* not used here. */
26622 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26623 (int)((char *)tl - (char *)reply->tag_list);
26624@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26625 cn_reply->id.idx = CN_IDX_DRBD;
26626 cn_reply->id.val = CN_VAL_DRBD;
26627
26628- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26629+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26630 cn_reply->ack = 0; // not used here.
26631 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26632 (int)((char*)tl - (char*)reply->tag_list);
26633@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26634 cn_reply->id.idx = CN_IDX_DRBD;
26635 cn_reply->id.val = CN_VAL_DRBD;
26636
26637- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26638+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26639 cn_reply->ack = 0; /* not used here. */
26640 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26641 (int)((char *)tl - (char *)reply->tag_list);
26642diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26643index 43beaca..4a5b1dd 100644
26644--- a/drivers/block/drbd/drbd_receiver.c
26645+++ b/drivers/block/drbd/drbd_receiver.c
26646@@ -894,7 +894,7 @@ retry:
26647 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26648 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26649
26650- atomic_set(&mdev->packet_seq, 0);
26651+ atomic_set_unchecked(&mdev->packet_seq, 0);
26652 mdev->peer_seq = 0;
26653
26654 drbd_thread_start(&mdev->asender);
26655@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26656 do {
26657 next_epoch = NULL;
26658
26659- epoch_size = atomic_read(&epoch->epoch_size);
26660+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26661
26662 switch (ev & ~EV_CLEANUP) {
26663 case EV_PUT:
26664@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26665 rv = FE_DESTROYED;
26666 } else {
26667 epoch->flags = 0;
26668- atomic_set(&epoch->epoch_size, 0);
26669+ atomic_set_unchecked(&epoch->epoch_size, 0);
26670 /* atomic_set(&epoch->active, 0); is already zero */
26671 if (rv == FE_STILL_LIVE)
26672 rv = FE_RECYCLED;
26673@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26674 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26675 drbd_flush(mdev);
26676
26677- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26678+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26679 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26680 if (epoch)
26681 break;
26682 }
26683
26684 epoch = mdev->current_epoch;
26685- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26686+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26687
26688 D_ASSERT(atomic_read(&epoch->active) == 0);
26689 D_ASSERT(epoch->flags == 0);
26690@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26691 }
26692
26693 epoch->flags = 0;
26694- atomic_set(&epoch->epoch_size, 0);
26695+ atomic_set_unchecked(&epoch->epoch_size, 0);
26696 atomic_set(&epoch->active, 0);
26697
26698 spin_lock(&mdev->epoch_lock);
26699- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26700+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26701 list_add(&epoch->list, &mdev->current_epoch->list);
26702 mdev->current_epoch = epoch;
26703 mdev->epochs++;
26704@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26705 spin_unlock(&mdev->peer_seq_lock);
26706
26707 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
26708- atomic_inc(&mdev->current_epoch->epoch_size);
26709+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
26710 return drbd_drain_block(mdev, data_size);
26711 }
26712
26713@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26714
26715 spin_lock(&mdev->epoch_lock);
26716 e->epoch = mdev->current_epoch;
26717- atomic_inc(&e->epoch->epoch_size);
26718+ atomic_inc_unchecked(&e->epoch->epoch_size);
26719 atomic_inc(&e->epoch->active);
26720 spin_unlock(&mdev->epoch_lock);
26721
26722@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
26723 D_ASSERT(list_empty(&mdev->done_ee));
26724
26725 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
26726- atomic_set(&mdev->current_epoch->epoch_size, 0);
26727+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
26728 D_ASSERT(list_empty(&mdev->current_epoch->list));
26729 }
26730
26731diff --git a/drivers/block/loop.c b/drivers/block/loop.c
26732index 1e888c9..05cf1b0 100644
26733--- a/drivers/block/loop.c
26734+++ b/drivers/block/loop.c
26735@@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
26736 mm_segment_t old_fs = get_fs();
26737
26738 set_fs(get_ds());
26739- bw = file->f_op->write(file, buf, len, &pos);
26740+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
26741 set_fs(old_fs);
26742 if (likely(bw == len))
26743 return 0;
26744diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
26745index 4364303..9adf4ee 100644
26746--- a/drivers/char/Kconfig
26747+++ b/drivers/char/Kconfig
26748@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
26749
26750 config DEVKMEM
26751 bool "/dev/kmem virtual device support"
26752- default y
26753+ default n
26754+ depends on !GRKERNSEC_KMEM
26755 help
26756 Say Y here if you want to support the /dev/kmem device. The
26757 /dev/kmem device is rarely used, but can be used for certain
26758@@ -596,6 +597,7 @@ config DEVPORT
26759 bool
26760 depends on !M68K
26761 depends on ISA || PCI
26762+ depends on !GRKERNSEC_KMEM
26763 default y
26764
26765 source "drivers/s390/char/Kconfig"
26766diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
26767index 2e04433..22afc64 100644
26768--- a/drivers/char/agp/frontend.c
26769+++ b/drivers/char/agp/frontend.c
26770@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
26771 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26772 return -EFAULT;
26773
26774- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26775+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26776 return -EFAULT;
26777
26778 client = agp_find_client_by_pid(reserve.pid);
26779diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
26780index 095ab90..afad0a4 100644
26781--- a/drivers/char/briq_panel.c
26782+++ b/drivers/char/briq_panel.c
26783@@ -9,6 +9,7 @@
26784 #include <linux/types.h>
26785 #include <linux/errno.h>
26786 #include <linux/tty.h>
26787+#include <linux/mutex.h>
26788 #include <linux/timer.h>
26789 #include <linux/kernel.h>
26790 #include <linux/wait.h>
26791@@ -34,6 +35,7 @@ static int vfd_is_open;
26792 static unsigned char vfd[40];
26793 static int vfd_cursor;
26794 static unsigned char ledpb, led;
26795+static DEFINE_MUTEX(vfd_mutex);
26796
26797 static void update_vfd(void)
26798 {
26799@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26800 if (!vfd_is_open)
26801 return -EBUSY;
26802
26803+ mutex_lock(&vfd_mutex);
26804 for (;;) {
26805 char c;
26806 if (!indx)
26807 break;
26808- if (get_user(c, buf))
26809+ if (get_user(c, buf)) {
26810+ mutex_unlock(&vfd_mutex);
26811 return -EFAULT;
26812+ }
26813 if (esc) {
26814 set_led(c);
26815 esc = 0;
26816@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26817 buf++;
26818 }
26819 update_vfd();
26820+ mutex_unlock(&vfd_mutex);
26821
26822 return len;
26823 }
26824diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
26825index f773a9d..65cd683 100644
26826--- a/drivers/char/genrtc.c
26827+++ b/drivers/char/genrtc.c
26828@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
26829 switch (cmd) {
26830
26831 case RTC_PLL_GET:
26832+ memset(&pll, 0, sizeof(pll));
26833 if (get_rtc_pll(&pll))
26834 return -EINVAL;
26835 else
26836diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
26837index 0833896..cccce52 100644
26838--- a/drivers/char/hpet.c
26839+++ b/drivers/char/hpet.c
26840@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
26841 }
26842
26843 static int
26844-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
26845+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
26846 struct hpet_info *info)
26847 {
26848 struct hpet_timer __iomem *timer;
26849diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
26850index 58c0e63..46c16bf 100644
26851--- a/drivers/char/ipmi/ipmi_msghandler.c
26852+++ b/drivers/char/ipmi/ipmi_msghandler.c
26853@@ -415,7 +415,7 @@ struct ipmi_smi {
26854 struct proc_dir_entry *proc_dir;
26855 char proc_dir_name[10];
26856
26857- atomic_t stats[IPMI_NUM_STATS];
26858+ atomic_unchecked_t stats[IPMI_NUM_STATS];
26859
26860 /*
26861 * run_to_completion duplicate of smb_info, smi_info
26862@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26863
26864
26865 #define ipmi_inc_stat(intf, stat) \
26866- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26867+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26868 #define ipmi_get_stat(intf, stat) \
26869- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26870+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26871
26872 static int is_lan_addr(struct ipmi_addr *addr)
26873 {
26874@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
26875 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26876 init_waitqueue_head(&intf->waitq);
26877 for (i = 0; i < IPMI_NUM_STATS; i++)
26878- atomic_set(&intf->stats[i], 0);
26879+ atomic_set_unchecked(&intf->stats[i], 0);
26880
26881 intf->proc_dir = NULL;
26882
26883diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
26884index 9397ab4..d01bee1 100644
26885--- a/drivers/char/ipmi/ipmi_si_intf.c
26886+++ b/drivers/char/ipmi/ipmi_si_intf.c
26887@@ -277,7 +277,7 @@ struct smi_info {
26888 unsigned char slave_addr;
26889
26890 /* Counters and things for the proc filesystem. */
26891- atomic_t stats[SI_NUM_STATS];
26892+ atomic_unchecked_t stats[SI_NUM_STATS];
26893
26894 struct task_struct *thread;
26895
26896@@ -286,9 +286,9 @@ struct smi_info {
26897 };
26898
26899 #define smi_inc_stat(smi, stat) \
26900- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26901+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26902 #define smi_get_stat(smi, stat) \
26903- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26904+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26905
26906 #define SI_MAX_PARMS 4
26907
26908@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
26909 atomic_set(&new_smi->req_events, 0);
26910 new_smi->run_to_completion = 0;
26911 for (i = 0; i < SI_NUM_STATS; i++)
26912- atomic_set(&new_smi->stats[i], 0);
26913+ atomic_set_unchecked(&new_smi->stats[i], 0);
26914
26915 new_smi->interrupt_disabled = 1;
26916 atomic_set(&new_smi->stop_operation, 0);
26917diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
26918index 1aeaaba..e018570 100644
26919--- a/drivers/char/mbcs.c
26920+++ b/drivers/char/mbcs.c
26921@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
26922 return 0;
26923 }
26924
26925-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
26926+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
26927 {
26928 .part_num = MBCS_PART_NUM,
26929 .mfg_num = MBCS_MFG_NUM,
26930diff --git a/drivers/char/mem.c b/drivers/char/mem.c
26931index 1451790..f705c30 100644
26932--- a/drivers/char/mem.c
26933+++ b/drivers/char/mem.c
26934@@ -18,6 +18,7 @@
26935 #include <linux/raw.h>
26936 #include <linux/tty.h>
26937 #include <linux/capability.h>
26938+#include <linux/security.h>
26939 #include <linux/ptrace.h>
26940 #include <linux/device.h>
26941 #include <linux/highmem.h>
26942@@ -35,6 +36,10 @@
26943 # include <linux/efi.h>
26944 #endif
26945
26946+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26947+extern const struct file_operations grsec_fops;
26948+#endif
26949+
26950 static inline unsigned long size_inside_page(unsigned long start,
26951 unsigned long size)
26952 {
26953@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26954
26955 while (cursor < to) {
26956 if (!devmem_is_allowed(pfn)) {
26957+#ifdef CONFIG_GRKERNSEC_KMEM
26958+ gr_handle_mem_readwrite(from, to);
26959+#else
26960 printk(KERN_INFO
26961 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
26962 current->comm, from, to);
26963+#endif
26964 return 0;
26965 }
26966 cursor += PAGE_SIZE;
26967@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26968 }
26969 return 1;
26970 }
26971+#elif defined(CONFIG_GRKERNSEC_KMEM)
26972+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26973+{
26974+ return 0;
26975+}
26976 #else
26977 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26978 {
26979@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
26980
26981 while (count > 0) {
26982 unsigned long remaining;
26983+ char *temp;
26984
26985 sz = size_inside_page(p, count);
26986
26987@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
26988 if (!ptr)
26989 return -EFAULT;
26990
26991- remaining = copy_to_user(buf, ptr, sz);
26992+#ifdef CONFIG_PAX_USERCOPY
26993+ temp = kmalloc(sz, GFP_KERNEL);
26994+ if (!temp) {
26995+ unxlate_dev_mem_ptr(p, ptr);
26996+ return -ENOMEM;
26997+ }
26998+ memcpy(temp, ptr, sz);
26999+#else
27000+ temp = ptr;
27001+#endif
27002+
27003+ remaining = copy_to_user(buf, temp, sz);
27004+
27005+#ifdef CONFIG_PAX_USERCOPY
27006+ kfree(temp);
27007+#endif
27008+
27009 unxlate_dev_mem_ptr(p, ptr);
27010 if (remaining)
27011 return -EFAULT;
27012@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27013 size_t count, loff_t *ppos)
27014 {
27015 unsigned long p = *ppos;
27016- ssize_t low_count, read, sz;
27017+ ssize_t low_count, read, sz, err = 0;
27018 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27019- int err = 0;
27020
27021 read = 0;
27022 if (p < (unsigned long) high_memory) {
27023@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27024 }
27025 #endif
27026 while (low_count > 0) {
27027+ char *temp;
27028+
27029 sz = size_inside_page(p, low_count);
27030
27031 /*
27032@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27033 */
27034 kbuf = xlate_dev_kmem_ptr((char *)p);
27035
27036- if (copy_to_user(buf, kbuf, sz))
27037+#ifdef CONFIG_PAX_USERCOPY
27038+ temp = kmalloc(sz, GFP_KERNEL);
27039+ if (!temp)
27040+ return -ENOMEM;
27041+ memcpy(temp, kbuf, sz);
27042+#else
27043+ temp = kbuf;
27044+#endif
27045+
27046+ err = copy_to_user(buf, temp, sz);
27047+
27048+#ifdef CONFIG_PAX_USERCOPY
27049+ kfree(temp);
27050+#endif
27051+
27052+ if (err)
27053 return -EFAULT;
27054 buf += sz;
27055 p += sz;
27056@@ -867,6 +914,9 @@ static const struct memdev {
27057 #ifdef CONFIG_CRASH_DUMP
27058 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27059 #endif
27060+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27061+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27062+#endif
27063 };
27064
27065 static int memory_open(struct inode *inode, struct file *filp)
27066diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27067index da3cfee..a5a6606 100644
27068--- a/drivers/char/nvram.c
27069+++ b/drivers/char/nvram.c
27070@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27071
27072 spin_unlock_irq(&rtc_lock);
27073
27074- if (copy_to_user(buf, contents, tmp - contents))
27075+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27076 return -EFAULT;
27077
27078 *ppos = i;
27079diff --git a/drivers/char/random.c b/drivers/char/random.c
27080index 6035ab8..bdfe4fd 100644
27081--- a/drivers/char/random.c
27082+++ b/drivers/char/random.c
27083@@ -261,8 +261,13 @@
27084 /*
27085 * Configuration information
27086 */
27087+#ifdef CONFIG_GRKERNSEC_RANDNET
27088+#define INPUT_POOL_WORDS 512
27089+#define OUTPUT_POOL_WORDS 128
27090+#else
27091 #define INPUT_POOL_WORDS 128
27092 #define OUTPUT_POOL_WORDS 32
27093+#endif
27094 #define SEC_XFER_SIZE 512
27095 #define EXTRACT_SIZE 10
27096
27097@@ -300,10 +305,17 @@ static struct poolinfo {
27098 int poolwords;
27099 int tap1, tap2, tap3, tap4, tap5;
27100 } poolinfo_table[] = {
27101+#ifdef CONFIG_GRKERNSEC_RANDNET
27102+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27103+ { 512, 411, 308, 208, 104, 1 },
27104+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27105+ { 128, 103, 76, 51, 25, 1 },
27106+#else
27107 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27108 { 128, 103, 76, 51, 25, 1 },
27109 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27110 { 32, 26, 20, 14, 7, 1 },
27111+#endif
27112 #if 0
27113 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27114 { 2048, 1638, 1231, 819, 411, 1 },
27115@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27116
27117 extract_buf(r, tmp);
27118 i = min_t(int, nbytes, EXTRACT_SIZE);
27119- if (copy_to_user(buf, tmp, i)) {
27120+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27121 ret = -EFAULT;
27122 break;
27123 }
27124@@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27125 #include <linux/sysctl.h>
27126
27127 static int min_read_thresh = 8, min_write_thresh;
27128-static int max_read_thresh = INPUT_POOL_WORDS * 32;
27129+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27130 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27131 static char sysctl_bootid[16];
27132
27133diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27134index 1ee8ce7..b778bef 100644
27135--- a/drivers/char/sonypi.c
27136+++ b/drivers/char/sonypi.c
27137@@ -55,6 +55,7 @@
27138 #include <asm/uaccess.h>
27139 #include <asm/io.h>
27140 #include <asm/system.h>
27141+#include <asm/local.h>
27142
27143 #include <linux/sonypi.h>
27144
27145@@ -491,7 +492,7 @@ static struct sonypi_device {
27146 spinlock_t fifo_lock;
27147 wait_queue_head_t fifo_proc_list;
27148 struct fasync_struct *fifo_async;
27149- int open_count;
27150+ local_t open_count;
27151 int model;
27152 struct input_dev *input_jog_dev;
27153 struct input_dev *input_key_dev;
27154@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27155 static int sonypi_misc_release(struct inode *inode, struct file *file)
27156 {
27157 mutex_lock(&sonypi_device.lock);
27158- sonypi_device.open_count--;
27159+ local_dec(&sonypi_device.open_count);
27160 mutex_unlock(&sonypi_device.lock);
27161 return 0;
27162 }
27163@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27164 {
27165 mutex_lock(&sonypi_device.lock);
27166 /* Flush input queue on first open */
27167- if (!sonypi_device.open_count)
27168+ if (!local_read(&sonypi_device.open_count))
27169 kfifo_reset(&sonypi_device.fifo);
27170- sonypi_device.open_count++;
27171+ local_inc(&sonypi_device.open_count);
27172 mutex_unlock(&sonypi_device.lock);
27173
27174 return 0;
27175diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27176index 361a1df..2471eee 100644
27177--- a/drivers/char/tpm/tpm.c
27178+++ b/drivers/char/tpm/tpm.c
27179@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27180 chip->vendor.req_complete_val)
27181 goto out_recv;
27182
27183- if ((status == chip->vendor.req_canceled)) {
27184+ if (status == chip->vendor.req_canceled) {
27185 dev_err(chip->dev, "Operation Canceled\n");
27186 rc = -ECANCELED;
27187 goto out;
27188diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27189index 0636520..169c1d0 100644
27190--- a/drivers/char/tpm/tpm_bios.c
27191+++ b/drivers/char/tpm/tpm_bios.c
27192@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27193 event = addr;
27194
27195 if ((event->event_type == 0 && event->event_size == 0) ||
27196- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27197+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27198 return NULL;
27199
27200 return addr;
27201@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27202 return NULL;
27203
27204 if ((event->event_type == 0 && event->event_size == 0) ||
27205- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27206+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27207 return NULL;
27208
27209 (*pos)++;
27210@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27211 int i;
27212
27213 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27214- seq_putc(m, data[i]);
27215+ if (!seq_putc(m, data[i]))
27216+ return -EFAULT;
27217
27218 return 0;
27219 }
27220@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27221 log->bios_event_log_end = log->bios_event_log + len;
27222
27223 virt = acpi_os_map_memory(start, len);
27224+ if (!virt) {
27225+ kfree(log->bios_event_log);
27226+ log->bios_event_log = NULL;
27227+ return -EFAULT;
27228+ }
27229
27230- memcpy(log->bios_event_log, virt, len);
27231+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27232
27233 acpi_os_unmap_memory(virt, len);
27234 return 0;
27235diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27236index 8e3c46d..c139b99 100644
27237--- a/drivers/char/virtio_console.c
27238+++ b/drivers/char/virtio_console.c
27239@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27240 if (to_user) {
27241 ssize_t ret;
27242
27243- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27244+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27245 if (ret)
27246 return -EFAULT;
27247 } else {
27248@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27249 if (!port_has_data(port) && !port->host_connected)
27250 return 0;
27251
27252- return fill_readbuf(port, ubuf, count, true);
27253+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27254 }
27255
27256 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27257diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
27258index eb1d864..39ee5a7 100644
27259--- a/drivers/dma/dmatest.c
27260+++ b/drivers/dma/dmatest.c
27261@@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
27262 }
27263 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
27264 cnt = dmatest_add_threads(dtc, DMA_PQ);
27265- thread_count += cnt > 0 ?: 0;
27266+ thread_count += cnt > 0 ? cnt : 0;
27267 }
27268
27269 pr_info("dmatest: Started %u threads using %s\n",
27270diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27271index c9eee6d..f9d5280 100644
27272--- a/drivers/edac/amd64_edac.c
27273+++ b/drivers/edac/amd64_edac.c
27274@@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27275 * PCI core identifies what devices are on a system during boot, and then
27276 * inquiry this table to see if this driver is for a given device found.
27277 */
27278-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27279+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27280 {
27281 .vendor = PCI_VENDOR_ID_AMD,
27282 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27283diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27284index e47e73b..348e0bd 100644
27285--- a/drivers/edac/amd76x_edac.c
27286+++ b/drivers/edac/amd76x_edac.c
27287@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27288 edac_mc_free(mci);
27289 }
27290
27291-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27292+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27293 {
27294 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27295 AMD762},
27296diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27297index 1af531a..3a8ff27 100644
27298--- a/drivers/edac/e752x_edac.c
27299+++ b/drivers/edac/e752x_edac.c
27300@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27301 edac_mc_free(mci);
27302 }
27303
27304-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27305+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27306 {
27307 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27308 E7520},
27309diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27310index 6ffb6d2..383d8d7 100644
27311--- a/drivers/edac/e7xxx_edac.c
27312+++ b/drivers/edac/e7xxx_edac.c
27313@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27314 edac_mc_free(mci);
27315 }
27316
27317-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27318+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27319 {
27320 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27321 E7205},
27322diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27323index 495198a..ac08c85 100644
27324--- a/drivers/edac/edac_pci_sysfs.c
27325+++ b/drivers/edac/edac_pci_sysfs.c
27326@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27327 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27328 static int edac_pci_poll_msec = 1000; /* one second workq period */
27329
27330-static atomic_t pci_parity_count = ATOMIC_INIT(0);
27331-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27332+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27333+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27334
27335 static struct kobject *edac_pci_top_main_kobj;
27336 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27337@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27338 edac_printk(KERN_CRIT, EDAC_PCI,
27339 "Signaled System Error on %s\n",
27340 pci_name(dev));
27341- atomic_inc(&pci_nonparity_count);
27342+ atomic_inc_unchecked(&pci_nonparity_count);
27343 }
27344
27345 if (status & (PCI_STATUS_PARITY)) {
27346@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27347 "Master Data Parity Error on %s\n",
27348 pci_name(dev));
27349
27350- atomic_inc(&pci_parity_count);
27351+ atomic_inc_unchecked(&pci_parity_count);
27352 }
27353
27354 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27355@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27356 "Detected Parity Error on %s\n",
27357 pci_name(dev));
27358
27359- atomic_inc(&pci_parity_count);
27360+ atomic_inc_unchecked(&pci_parity_count);
27361 }
27362 }
27363
27364@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27365 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27366 "Signaled System Error on %s\n",
27367 pci_name(dev));
27368- atomic_inc(&pci_nonparity_count);
27369+ atomic_inc_unchecked(&pci_nonparity_count);
27370 }
27371
27372 if (status & (PCI_STATUS_PARITY)) {
27373@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27374 "Master Data Parity Error on "
27375 "%s\n", pci_name(dev));
27376
27377- atomic_inc(&pci_parity_count);
27378+ atomic_inc_unchecked(&pci_parity_count);
27379 }
27380
27381 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27382@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27383 "Detected Parity Error on %s\n",
27384 pci_name(dev));
27385
27386- atomic_inc(&pci_parity_count);
27387+ atomic_inc_unchecked(&pci_parity_count);
27388 }
27389 }
27390 }
27391@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27392 if (!check_pci_errors)
27393 return;
27394
27395- before_count = atomic_read(&pci_parity_count);
27396+ before_count = atomic_read_unchecked(&pci_parity_count);
27397
27398 /* scan all PCI devices looking for a Parity Error on devices and
27399 * bridges.
27400@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27401 /* Only if operator has selected panic on PCI Error */
27402 if (edac_pci_get_panic_on_pe()) {
27403 /* If the count is different 'after' from 'before' */
27404- if (before_count != atomic_read(&pci_parity_count))
27405+ if (before_count != atomic_read_unchecked(&pci_parity_count))
27406 panic("EDAC: PCI Parity Error");
27407 }
27408 }
27409diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27410index c0510b3..6e2a954 100644
27411--- a/drivers/edac/i3000_edac.c
27412+++ b/drivers/edac/i3000_edac.c
27413@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27414 edac_mc_free(mci);
27415 }
27416
27417-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27418+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27419 {
27420 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27421 I3000},
27422diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27423index aa08497..7e6822a 100644
27424--- a/drivers/edac/i3200_edac.c
27425+++ b/drivers/edac/i3200_edac.c
27426@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27427 edac_mc_free(mci);
27428 }
27429
27430-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27431+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27432 {
27433 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27434 I3200},
27435diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27436index 4dc3ac2..67d05a6 100644
27437--- a/drivers/edac/i5000_edac.c
27438+++ b/drivers/edac/i5000_edac.c
27439@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27440 *
27441 * The "E500P" device is the first device supported.
27442 */
27443-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27444+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27445 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27446 .driver_data = I5000P},
27447
27448diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27449index bcbdeec..9886d16 100644
27450--- a/drivers/edac/i5100_edac.c
27451+++ b/drivers/edac/i5100_edac.c
27452@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27453 edac_mc_free(mci);
27454 }
27455
27456-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27457+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27458 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27459 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27460 { 0, }
27461diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27462index 74d6ec34..baff517 100644
27463--- a/drivers/edac/i5400_edac.c
27464+++ b/drivers/edac/i5400_edac.c
27465@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27466 *
27467 * The "E500P" device is the first device supported.
27468 */
27469-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27470+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27471 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27472 {0,} /* 0 terminated list. */
27473 };
27474diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27475index 6104dba..e7ea8e1 100644
27476--- a/drivers/edac/i7300_edac.c
27477+++ b/drivers/edac/i7300_edac.c
27478@@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27479 *
27480 * Has only 8086:360c PCI ID
27481 */
27482-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27483+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27484 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27485 {0,} /* 0 terminated list. */
27486 };
27487diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27488index 70ad892..178943c 100644
27489--- a/drivers/edac/i7core_edac.c
27490+++ b/drivers/edac/i7core_edac.c
27491@@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
27492 /*
27493 * pci_device_id table for which devices we are looking for
27494 */
27495-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27496+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27497 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27498 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27499 {0,} /* 0 terminated list. */
27500diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27501index 4329d39..f3022ef 100644
27502--- a/drivers/edac/i82443bxgx_edac.c
27503+++ b/drivers/edac/i82443bxgx_edac.c
27504@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27505
27506 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27507
27508-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27509+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27510 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27511 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27512 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27513diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27514index 931a057..fd28340 100644
27515--- a/drivers/edac/i82860_edac.c
27516+++ b/drivers/edac/i82860_edac.c
27517@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27518 edac_mc_free(mci);
27519 }
27520
27521-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27522+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27523 {
27524 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27525 I82860},
27526diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27527index 33864c6..01edc61 100644
27528--- a/drivers/edac/i82875p_edac.c
27529+++ b/drivers/edac/i82875p_edac.c
27530@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27531 edac_mc_free(mci);
27532 }
27533
27534-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27535+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27536 {
27537 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27538 I82875P},
27539diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27540index a5da732..983363b 100644
27541--- a/drivers/edac/i82975x_edac.c
27542+++ b/drivers/edac/i82975x_edac.c
27543@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27544 edac_mc_free(mci);
27545 }
27546
27547-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27548+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27549 {
27550 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27551 I82975X
27552diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27553index 0106747..0b40417 100644
27554--- a/drivers/edac/mce_amd.h
27555+++ b/drivers/edac/mce_amd.h
27556@@ -83,7 +83,7 @@ struct amd_decoder_ops {
27557 bool (*dc_mce)(u16, u8);
27558 bool (*ic_mce)(u16, u8);
27559 bool (*nb_mce)(u16, u8);
27560-};
27561+} __no_const;
27562
27563 void amd_report_gart_errors(bool);
27564 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
27565diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27566index b153674..ad2ba9b 100644
27567--- a/drivers/edac/r82600_edac.c
27568+++ b/drivers/edac/r82600_edac.c
27569@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27570 edac_mc_free(mci);
27571 }
27572
27573-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27574+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27575 {
27576 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27577 },
27578diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
27579index 7a402bf..af0b211 100644
27580--- a/drivers/edac/sb_edac.c
27581+++ b/drivers/edac/sb_edac.c
27582@@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
27583 /*
27584 * pci_device_id table for which devices we are looking for
27585 */
27586-static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
27587+static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
27588 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
27589 {0,} /* 0 terminated list. */
27590 };
27591diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27592index b6f47de..c5acf3a 100644
27593--- a/drivers/edac/x38_edac.c
27594+++ b/drivers/edac/x38_edac.c
27595@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27596 edac_mc_free(mci);
27597 }
27598
27599-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27600+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27601 {
27602 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27603 X38},
27604diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27605index 85661b0..c784559a 100644
27606--- a/drivers/firewire/core-card.c
27607+++ b/drivers/firewire/core-card.c
27608@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27609
27610 void fw_core_remove_card(struct fw_card *card)
27611 {
27612- struct fw_card_driver dummy_driver = dummy_driver_template;
27613+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
27614
27615 card->driver->update_phy_reg(card, 4,
27616 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27617diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27618index 4799393..37bd3ab 100644
27619--- a/drivers/firewire/core-cdev.c
27620+++ b/drivers/firewire/core-cdev.c
27621@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27622 int ret;
27623
27624 if ((request->channels == 0 && request->bandwidth == 0) ||
27625- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27626- request->bandwidth < 0)
27627+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27628 return -EINVAL;
27629
27630 r = kmalloc(sizeof(*r), GFP_KERNEL);
27631diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27632index 855ab3f..11f4bbd 100644
27633--- a/drivers/firewire/core-transaction.c
27634+++ b/drivers/firewire/core-transaction.c
27635@@ -37,6 +37,7 @@
27636 #include <linux/timer.h>
27637 #include <linux/types.h>
27638 #include <linux/workqueue.h>
27639+#include <linux/sched.h>
27640
27641 #include <asm/byteorder.h>
27642
27643diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27644index b45be57..5fad18b 100644
27645--- a/drivers/firewire/core.h
27646+++ b/drivers/firewire/core.h
27647@@ -101,6 +101,7 @@ struct fw_card_driver {
27648
27649 int (*stop_iso)(struct fw_iso_context *ctx);
27650 };
27651+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27652
27653 void fw_card_initialize(struct fw_card *card,
27654 const struct fw_card_driver *driver, struct device *device);
27655diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27656index 153980b..4b4d046 100644
27657--- a/drivers/firmware/dmi_scan.c
27658+++ b/drivers/firmware/dmi_scan.c
27659@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27660 }
27661 }
27662 else {
27663- /*
27664- * no iounmap() for that ioremap(); it would be a no-op, but
27665- * it's so early in setup that sucker gets confused into doing
27666- * what it shouldn't if we actually call it.
27667- */
27668 p = dmi_ioremap(0xF0000, 0x10000);
27669 if (p == NULL)
27670 goto error;
27671@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27672 if (buf == NULL)
27673 return -1;
27674
27675- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27676+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27677
27678 iounmap(buf);
27679 return 0;
27680diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27681index 98723cb..10ca85b 100644
27682--- a/drivers/gpio/gpio-vr41xx.c
27683+++ b/drivers/gpio/gpio-vr41xx.c
27684@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27685 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27686 maskl, pendl, maskh, pendh);
27687
27688- atomic_inc(&irq_err_count);
27689+ atomic_inc_unchecked(&irq_err_count);
27690
27691 return -EINVAL;
27692 }
27693diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27694index 8323fc3..5c1d755 100644
27695--- a/drivers/gpu/drm/drm_crtc.c
27696+++ b/drivers/gpu/drm/drm_crtc.c
27697@@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27698 */
27699 if ((out_resp->count_modes >= mode_count) && mode_count) {
27700 copied = 0;
27701- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27702+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27703 list_for_each_entry(mode, &connector->modes, head) {
27704 drm_crtc_convert_to_umode(&u_mode, mode);
27705 if (copy_to_user(mode_ptr + copied,
27706@@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27707
27708 if ((out_resp->count_props >= props_count) && props_count) {
27709 copied = 0;
27710- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
27711- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
27712+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
27713+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
27714 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
27715 if (connector->property_ids[i] != 0) {
27716 if (put_user(connector->property_ids[i],
27717@@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27718
27719 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
27720 copied = 0;
27721- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
27722+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
27723 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
27724 if (connector->encoder_ids[i] != 0) {
27725 if (put_user(connector->encoder_ids[i],
27726@@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
27727 }
27728
27729 for (i = 0; i < crtc_req->count_connectors; i++) {
27730- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
27731+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
27732 if (get_user(out_id, &set_connectors_ptr[i])) {
27733 ret = -EFAULT;
27734 goto out;
27735@@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
27736 fb = obj_to_fb(obj);
27737
27738 num_clips = r->num_clips;
27739- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
27740+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
27741
27742 if (!num_clips != !clips_ptr) {
27743 ret = -EINVAL;
27744@@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27745 out_resp->flags = property->flags;
27746
27747 if ((out_resp->count_values >= value_count) && value_count) {
27748- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
27749+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
27750 for (i = 0; i < value_count; i++) {
27751 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
27752 ret = -EFAULT;
27753@@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27754 if (property->flags & DRM_MODE_PROP_ENUM) {
27755 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
27756 copied = 0;
27757- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
27758+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
27759 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
27760
27761 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
27762@@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27763 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
27764 copied = 0;
27765 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
27766- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
27767+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
27768
27769 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
27770 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
27771@@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27772 struct drm_mode_get_blob *out_resp = data;
27773 struct drm_property_blob *blob;
27774 int ret = 0;
27775- void *blob_ptr;
27776+ void __user *blob_ptr;
27777
27778 if (!drm_core_check_feature(dev, DRIVER_MODESET))
27779 return -EINVAL;
27780@@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27781 blob = obj_to_blob(obj);
27782
27783 if (out_resp->length == blob->length) {
27784- blob_ptr = (void *)(unsigned long)out_resp->data;
27785+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
27786 if (copy_to_user(blob_ptr, blob->data, blob->length)){
27787 ret = -EFAULT;
27788 goto done;
27789diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
27790index d2619d7..bd6bd00 100644
27791--- a/drivers/gpu/drm/drm_crtc_helper.c
27792+++ b/drivers/gpu/drm/drm_crtc_helper.c
27793@@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
27794 struct drm_crtc *tmp;
27795 int crtc_mask = 1;
27796
27797- WARN(!crtc, "checking null crtc?\n");
27798+ BUG_ON(!crtc);
27799
27800 dev = crtc->dev;
27801
27802diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
27803index 40c187c..5746164 100644
27804--- a/drivers/gpu/drm/drm_drv.c
27805+++ b/drivers/gpu/drm/drm_drv.c
27806@@ -308,7 +308,7 @@ module_exit(drm_core_exit);
27807 /**
27808 * Copy and IOCTL return string to user space
27809 */
27810-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
27811+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
27812 {
27813 int len;
27814
27815@@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
27816
27817 dev = file_priv->minor->dev;
27818 atomic_inc(&dev->ioctl_count);
27819- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27820+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27821 ++file_priv->ioctl_count;
27822
27823 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27824diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
27825index 4911e1d..484c8a3 100644
27826--- a/drivers/gpu/drm/drm_fops.c
27827+++ b/drivers/gpu/drm/drm_fops.c
27828@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
27829 }
27830
27831 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27832- atomic_set(&dev->counts[i], 0);
27833+ atomic_set_unchecked(&dev->counts[i], 0);
27834
27835 dev->sigdata.lock = NULL;
27836
27837@@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
27838
27839 retcode = drm_open_helper(inode, filp, dev);
27840 if (!retcode) {
27841- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27842- if (!dev->open_count++)
27843+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27844+ if (local_inc_return(&dev->open_count) == 1)
27845 retcode = drm_setup(dev);
27846 }
27847 if (!retcode) {
27848@@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
27849
27850 mutex_lock(&drm_global_mutex);
27851
27852- DRM_DEBUG("open_count = %d\n", dev->open_count);
27853+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27854
27855 if (dev->driver->preclose)
27856 dev->driver->preclose(dev, file_priv);
27857@@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
27858 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27859 task_pid_nr(current),
27860 (long)old_encode_dev(file_priv->minor->device),
27861- dev->open_count);
27862+ local_read(&dev->open_count));
27863
27864 /* if the master has gone away we can't do anything with the lock */
27865 if (file_priv->minor->master)
27866@@ -566,8 +566,8 @@ int drm_release(struct inode *inode, struct file *filp)
27867 * End inline drm_release
27868 */
27869
27870- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27871- if (!--dev->open_count) {
27872+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27873+ if (local_dec_and_test(&dev->open_count)) {
27874 if (atomic_read(&dev->ioctl_count)) {
27875 DRM_ERROR("Device busy: %d\n",
27876 atomic_read(&dev->ioctl_count));
27877diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
27878index c87dc96..326055d 100644
27879--- a/drivers/gpu/drm/drm_global.c
27880+++ b/drivers/gpu/drm/drm_global.c
27881@@ -36,7 +36,7 @@
27882 struct drm_global_item {
27883 struct mutex mutex;
27884 void *object;
27885- int refcount;
27886+ atomic_t refcount;
27887 };
27888
27889 static struct drm_global_item glob[DRM_GLOBAL_NUM];
27890@@ -49,7 +49,7 @@ void drm_global_init(void)
27891 struct drm_global_item *item = &glob[i];
27892 mutex_init(&item->mutex);
27893 item->object = NULL;
27894- item->refcount = 0;
27895+ atomic_set(&item->refcount, 0);
27896 }
27897 }
27898
27899@@ -59,7 +59,7 @@ void drm_global_release(void)
27900 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
27901 struct drm_global_item *item = &glob[i];
27902 BUG_ON(item->object != NULL);
27903- BUG_ON(item->refcount != 0);
27904+ BUG_ON(atomic_read(&item->refcount) != 0);
27905 }
27906 }
27907
27908@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
27909 void *object;
27910
27911 mutex_lock(&item->mutex);
27912- if (item->refcount == 0) {
27913+ if (atomic_read(&item->refcount) == 0) {
27914 item->object = kzalloc(ref->size, GFP_KERNEL);
27915 if (unlikely(item->object == NULL)) {
27916 ret = -ENOMEM;
27917@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
27918 goto out_err;
27919
27920 }
27921- ++item->refcount;
27922+ atomic_inc(&item->refcount);
27923 ref->object = item->object;
27924 object = item->object;
27925 mutex_unlock(&item->mutex);
27926@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
27927 struct drm_global_item *item = &glob[ref->global_type];
27928
27929 mutex_lock(&item->mutex);
27930- BUG_ON(item->refcount == 0);
27931+ BUG_ON(atomic_read(&item->refcount) == 0);
27932 BUG_ON(ref->object != item->object);
27933- if (--item->refcount == 0) {
27934+ if (atomic_dec_and_test(&item->refcount)) {
27935 ref->release(ref);
27936 item->object = NULL;
27937 }
27938diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
27939index ab1162d..42587b2 100644
27940--- a/drivers/gpu/drm/drm_info.c
27941+++ b/drivers/gpu/drm/drm_info.c
27942@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
27943 struct drm_local_map *map;
27944 struct drm_map_list *r_list;
27945
27946- /* Hardcoded from _DRM_FRAME_BUFFER,
27947- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
27948- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
27949- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
27950+ static const char * const types[] = {
27951+ [_DRM_FRAME_BUFFER] = "FB",
27952+ [_DRM_REGISTERS] = "REG",
27953+ [_DRM_SHM] = "SHM",
27954+ [_DRM_AGP] = "AGP",
27955+ [_DRM_SCATTER_GATHER] = "SG",
27956+ [_DRM_CONSISTENT] = "PCI",
27957+ [_DRM_GEM] = "GEM" };
27958 const char *type;
27959 int i;
27960
27961@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
27962 map = r_list->map;
27963 if (!map)
27964 continue;
27965- if (map->type < 0 || map->type > 5)
27966+ if (map->type >= ARRAY_SIZE(types))
27967 type = "??";
27968 else
27969 type = types[map->type];
27970@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
27971 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
27972 vma->vm_flags & VM_LOCKED ? 'l' : '-',
27973 vma->vm_flags & VM_IO ? 'i' : '-',
27974+#ifdef CONFIG_GRKERNSEC_HIDESYM
27975+ 0);
27976+#else
27977 vma->vm_pgoff);
27978+#endif
27979
27980 #if defined(__i386__)
27981 pgprot = pgprot_val(vma->vm_page_prot);
27982diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
27983index ddd70db..40321e6 100644
27984--- a/drivers/gpu/drm/drm_ioc32.c
27985+++ b/drivers/gpu/drm/drm_ioc32.c
27986@@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
27987 request = compat_alloc_user_space(nbytes);
27988 if (!access_ok(VERIFY_WRITE, request, nbytes))
27989 return -EFAULT;
27990- list = (struct drm_buf_desc *) (request + 1);
27991+ list = (struct drm_buf_desc __user *) (request + 1);
27992
27993 if (__put_user(count, &request->count)
27994 || __put_user(list, &request->list))
27995@@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
27996 request = compat_alloc_user_space(nbytes);
27997 if (!access_ok(VERIFY_WRITE, request, nbytes))
27998 return -EFAULT;
27999- list = (struct drm_buf_pub *) (request + 1);
28000+ list = (struct drm_buf_pub __user *) (request + 1);
28001
28002 if (__put_user(count, &request->count)
28003 || __put_user(list, &request->list))
28004diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28005index 904d7e9..ab88581 100644
28006--- a/drivers/gpu/drm/drm_ioctl.c
28007+++ b/drivers/gpu/drm/drm_ioctl.c
28008@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28009 stats->data[i].value =
28010 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28011 else
28012- stats->data[i].value = atomic_read(&dev->counts[i]);
28013+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28014 stats->data[i].type = dev->types[i];
28015 }
28016
28017diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28018index 632ae24..244cf4a 100644
28019--- a/drivers/gpu/drm/drm_lock.c
28020+++ b/drivers/gpu/drm/drm_lock.c
28021@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28022 if (drm_lock_take(&master->lock, lock->context)) {
28023 master->lock.file_priv = file_priv;
28024 master->lock.lock_time = jiffies;
28025- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28026+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28027 break; /* Got lock */
28028 }
28029
28030@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28031 return -EINVAL;
28032 }
28033
28034- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28035+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28036
28037 if (drm_lock_free(&master->lock, lock->context)) {
28038 /* FIXME: Should really bail out here. */
28039diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28040index 8f371e8..9f85d52 100644
28041--- a/drivers/gpu/drm/i810/i810_dma.c
28042+++ b/drivers/gpu/drm/i810/i810_dma.c
28043@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28044 dma->buflist[vertex->idx],
28045 vertex->discard, vertex->used);
28046
28047- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28048- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28049+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28050+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28051 sarea_priv->last_enqueue = dev_priv->counter - 1;
28052 sarea_priv->last_dispatch = (int)hw_status[5];
28053
28054@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28055 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28056 mc->last_render);
28057
28058- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28059- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28060+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28061+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28062 sarea_priv->last_enqueue = dev_priv->counter - 1;
28063 sarea_priv->last_dispatch = (int)hw_status[5];
28064
28065diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28066index c9339f4..f5e1b9d 100644
28067--- a/drivers/gpu/drm/i810/i810_drv.h
28068+++ b/drivers/gpu/drm/i810/i810_drv.h
28069@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28070 int page_flipping;
28071
28072 wait_queue_head_t irq_queue;
28073- atomic_t irq_received;
28074- atomic_t irq_emitted;
28075+ atomic_unchecked_t irq_received;
28076+ atomic_unchecked_t irq_emitted;
28077
28078 int front_offset;
28079 } drm_i810_private_t;
28080diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28081index 004b048..7588eba 100644
28082--- a/drivers/gpu/drm/i915/i915_debugfs.c
28083+++ b/drivers/gpu/drm/i915/i915_debugfs.c
28084@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28085 I915_READ(GTIMR));
28086 }
28087 seq_printf(m, "Interrupts received: %d\n",
28088- atomic_read(&dev_priv->irq_received));
28089+ atomic_read_unchecked(&dev_priv->irq_received));
28090 for (i = 0; i < I915_NUM_RINGS; i++) {
28091 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28092 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28093@@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28094 return ret;
28095
28096 if (opregion->header)
28097- seq_write(m, opregion->header, OPREGION_SIZE);
28098+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28099
28100 mutex_unlock(&dev->struct_mutex);
28101
28102diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28103index a9ae374..43c1e9e 100644
28104--- a/drivers/gpu/drm/i915/i915_dma.c
28105+++ b/drivers/gpu/drm/i915/i915_dma.c
28106@@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28107 bool can_switch;
28108
28109 spin_lock(&dev->count_lock);
28110- can_switch = (dev->open_count == 0);
28111+ can_switch = (local_read(&dev->open_count) == 0);
28112 spin_unlock(&dev->count_lock);
28113 return can_switch;
28114 }
28115diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28116index 554bef7..d24791c 100644
28117--- a/drivers/gpu/drm/i915/i915_drv.h
28118+++ b/drivers/gpu/drm/i915/i915_drv.h
28119@@ -229,7 +229,7 @@ struct drm_i915_display_funcs {
28120 /* render clock increase/decrease */
28121 /* display clock increase/decrease */
28122 /* pll clock increase/decrease */
28123-};
28124+} __no_const;
28125
28126 struct intel_device_info {
28127 u8 gen;
28128@@ -312,7 +312,7 @@ typedef struct drm_i915_private {
28129 int current_page;
28130 int page_flipping;
28131
28132- atomic_t irq_received;
28133+ atomic_unchecked_t irq_received;
28134
28135 /* protects the irq masks */
28136 spinlock_t irq_lock;
28137@@ -887,7 +887,7 @@ struct drm_i915_gem_object {
28138 * will be page flipped away on the next vblank. When it
28139 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28140 */
28141- atomic_t pending_flip;
28142+ atomic_unchecked_t pending_flip;
28143 };
28144
28145 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28146@@ -1267,7 +1267,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28147 extern void intel_teardown_gmbus(struct drm_device *dev);
28148 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28149 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28150-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28151+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28152 {
28153 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28154 }
28155diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28156index b9da890..cad1d98 100644
28157--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28158+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28159@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28160 i915_gem_clflush_object(obj);
28161
28162 if (obj->base.pending_write_domain)
28163- cd->flips |= atomic_read(&obj->pending_flip);
28164+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28165
28166 /* The actual obj->write_domain will be updated with
28167 * pending_write_domain after we emit the accumulated flush for all
28168@@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28169
28170 static int
28171 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28172- int count)
28173+ unsigned int count)
28174 {
28175- int i;
28176+ unsigned int i;
28177
28178 for (i = 0; i < count; i++) {
28179 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28180diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28181index b40004b..7c53a75 100644
28182--- a/drivers/gpu/drm/i915/i915_irq.c
28183+++ b/drivers/gpu/drm/i915/i915_irq.c
28184@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28185 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28186 struct drm_i915_master_private *master_priv;
28187
28188- atomic_inc(&dev_priv->irq_received);
28189+ atomic_inc_unchecked(&dev_priv->irq_received);
28190
28191 /* disable master interrupt before clearing iir */
28192 de_ier = I915_READ(DEIER);
28193@@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28194 struct drm_i915_master_private *master_priv;
28195 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28196
28197- atomic_inc(&dev_priv->irq_received);
28198+ atomic_inc_unchecked(&dev_priv->irq_received);
28199
28200 if (IS_GEN6(dev))
28201 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28202@@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28203 int ret = IRQ_NONE, pipe;
28204 bool blc_event = false;
28205
28206- atomic_inc(&dev_priv->irq_received);
28207+ atomic_inc_unchecked(&dev_priv->irq_received);
28208
28209 iir = I915_READ(IIR);
28210
28211@@ -1743,7 +1743,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28212 {
28213 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28214
28215- atomic_set(&dev_priv->irq_received, 0);
28216+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28217
28218 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28219 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28220@@ -1931,7 +1931,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28221 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28222 int pipe;
28223
28224- atomic_set(&dev_priv->irq_received, 0);
28225+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28226
28227 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28228 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28229diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28230index daa5743..c0757a9 100644
28231--- a/drivers/gpu/drm/i915/intel_display.c
28232+++ b/drivers/gpu/drm/i915/intel_display.c
28233@@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28234
28235 wait_event(dev_priv->pending_flip_queue,
28236 atomic_read(&dev_priv->mm.wedged) ||
28237- atomic_read(&obj->pending_flip) == 0);
28238+ atomic_read_unchecked(&obj->pending_flip) == 0);
28239
28240 /* Big Hammer, we also need to ensure that any pending
28241 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28242@@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28243 obj = to_intel_framebuffer(crtc->fb)->obj;
28244 dev_priv = crtc->dev->dev_private;
28245 wait_event(dev_priv->pending_flip_queue,
28246- atomic_read(&obj->pending_flip) == 0);
28247+ atomic_read_unchecked(&obj->pending_flip) == 0);
28248 }
28249
28250 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28251@@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28252
28253 atomic_clear_mask(1 << intel_crtc->plane,
28254 &obj->pending_flip.counter);
28255- if (atomic_read(&obj->pending_flip) == 0)
28256+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
28257 wake_up(&dev_priv->pending_flip_queue);
28258
28259 schedule_work(&work->work);
28260@@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28261 /* Block clients from rendering to the new back buffer until
28262 * the flip occurs and the object is no longer visible.
28263 */
28264- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28265+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28266
28267 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28268 if (ret)
28269@@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28270 return 0;
28271
28272 cleanup_pending:
28273- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28274+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28275 drm_gem_object_unreference(&work->old_fb_obj->base);
28276 drm_gem_object_unreference(&obj->base);
28277 mutex_unlock(&dev->struct_mutex);
28278diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28279index 54558a0..2d97005 100644
28280--- a/drivers/gpu/drm/mga/mga_drv.h
28281+++ b/drivers/gpu/drm/mga/mga_drv.h
28282@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28283 u32 clear_cmd;
28284 u32 maccess;
28285
28286- atomic_t vbl_received; /**< Number of vblanks received. */
28287+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28288 wait_queue_head_t fence_queue;
28289- atomic_t last_fence_retired;
28290+ atomic_unchecked_t last_fence_retired;
28291 u32 next_fence_to_post;
28292
28293 unsigned int fb_cpp;
28294diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28295index 2581202..f230a8d9 100644
28296--- a/drivers/gpu/drm/mga/mga_irq.c
28297+++ b/drivers/gpu/drm/mga/mga_irq.c
28298@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28299 if (crtc != 0)
28300 return 0;
28301
28302- return atomic_read(&dev_priv->vbl_received);
28303+ return atomic_read_unchecked(&dev_priv->vbl_received);
28304 }
28305
28306
28307@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28308 /* VBLANK interrupt */
28309 if (status & MGA_VLINEPEN) {
28310 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28311- atomic_inc(&dev_priv->vbl_received);
28312+ atomic_inc_unchecked(&dev_priv->vbl_received);
28313 drm_handle_vblank(dev, 0);
28314 handled = 1;
28315 }
28316@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28317 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28318 MGA_WRITE(MGA_PRIMEND, prim_end);
28319
28320- atomic_inc(&dev_priv->last_fence_retired);
28321+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
28322 DRM_WAKEUP(&dev_priv->fence_queue);
28323 handled = 1;
28324 }
28325@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28326 * using fences.
28327 */
28328 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28329- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28330+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28331 - *sequence) <= (1 << 23)));
28332
28333 *sequence = cur_fence;
28334diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28335index 5fc201b..7b032b9 100644
28336--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28337+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28338@@ -201,7 +201,7 @@ struct methods {
28339 const char desc[8];
28340 void (*loadbios)(struct drm_device *, uint8_t *);
28341 const bool rw;
28342-};
28343+} __do_const;
28344
28345 static struct methods shadow_methods[] = {
28346 { "PRAMIN", load_vbios_pramin, true },
28347@@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28348 struct bit_table {
28349 const char id;
28350 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28351-};
28352+} __no_const;
28353
28354 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28355
28356diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28357index 4c0be3a..5757582 100644
28358--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28359+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28360@@ -238,7 +238,7 @@ struct nouveau_channel {
28361 struct list_head pending;
28362 uint32_t sequence;
28363 uint32_t sequence_ack;
28364- atomic_t last_sequence_irq;
28365+ atomic_unchecked_t last_sequence_irq;
28366 struct nouveau_vma vma;
28367 } fence;
28368
28369@@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28370 u32 handle, u16 class);
28371 void (*set_tile_region)(struct drm_device *dev, int i);
28372 void (*tlb_flush)(struct drm_device *, int engine);
28373-};
28374+} __no_const;
28375
28376 struct nouveau_instmem_engine {
28377 void *priv;
28378@@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28379 struct nouveau_mc_engine {
28380 int (*init)(struct drm_device *dev);
28381 void (*takedown)(struct drm_device *dev);
28382-};
28383+} __no_const;
28384
28385 struct nouveau_timer_engine {
28386 int (*init)(struct drm_device *dev);
28387 void (*takedown)(struct drm_device *dev);
28388 uint64_t (*read)(struct drm_device *dev);
28389-};
28390+} __no_const;
28391
28392 struct nouveau_fb_engine {
28393 int num_tiles;
28394@@ -558,7 +558,7 @@ struct nouveau_vram_engine {
28395 void (*put)(struct drm_device *, struct nouveau_mem **);
28396
28397 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28398-};
28399+} __no_const;
28400
28401 struct nouveau_engine {
28402 struct nouveau_instmem_engine instmem;
28403@@ -706,7 +706,7 @@ struct drm_nouveau_private {
28404 struct drm_global_reference mem_global_ref;
28405 struct ttm_bo_global_ref bo_global_ref;
28406 struct ttm_bo_device bdev;
28407- atomic_t validate_sequence;
28408+ atomic_unchecked_t validate_sequence;
28409 } ttm;
28410
28411 struct {
28412diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28413index 2f6daae..c9d7b9e 100644
28414--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28415+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28416@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28417 if (USE_REFCNT(dev))
28418 sequence = nvchan_rd32(chan, 0x48);
28419 else
28420- sequence = atomic_read(&chan->fence.last_sequence_irq);
28421+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28422
28423 if (chan->fence.sequence_ack == sequence)
28424 goto out;
28425@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28426 return ret;
28427 }
28428
28429- atomic_set(&chan->fence.last_sequence_irq, 0);
28430+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28431 return 0;
28432 }
28433
28434diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28435index 5f0bc57..eb9fac8 100644
28436--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28437+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28438@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28439 int trycnt = 0;
28440 int ret, i;
28441
28442- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28443+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28444 retry:
28445 if (++trycnt > 100000) {
28446 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28447diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28448index d8831ab..0ba8356 100644
28449--- a/drivers/gpu/drm/nouveau/nouveau_state.c
28450+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28451@@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28452 bool can_switch;
28453
28454 spin_lock(&dev->count_lock);
28455- can_switch = (dev->open_count == 0);
28456+ can_switch = (local_read(&dev->open_count) == 0);
28457 spin_unlock(&dev->count_lock);
28458 return can_switch;
28459 }
28460diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28461index dbdea8e..cd6eeeb 100644
28462--- a/drivers/gpu/drm/nouveau/nv04_graph.c
28463+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28464@@ -554,7 +554,7 @@ static int
28465 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28466 u32 class, u32 mthd, u32 data)
28467 {
28468- atomic_set(&chan->fence.last_sequence_irq, data);
28469+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28470 return 0;
28471 }
28472
28473diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28474index bcac90b..53bfc76 100644
28475--- a/drivers/gpu/drm/r128/r128_cce.c
28476+++ b/drivers/gpu/drm/r128/r128_cce.c
28477@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28478
28479 /* GH: Simple idle check.
28480 */
28481- atomic_set(&dev_priv->idle_count, 0);
28482+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28483
28484 /* We don't support anything other than bus-mastering ring mode,
28485 * but the ring can be in either AGP or PCI space for the ring
28486diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28487index 930c71b..499aded 100644
28488--- a/drivers/gpu/drm/r128/r128_drv.h
28489+++ b/drivers/gpu/drm/r128/r128_drv.h
28490@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28491 int is_pci;
28492 unsigned long cce_buffers_offset;
28493
28494- atomic_t idle_count;
28495+ atomic_unchecked_t idle_count;
28496
28497 int page_flipping;
28498 int current_page;
28499 u32 crtc_offset;
28500 u32 crtc_offset_cntl;
28501
28502- atomic_t vbl_received;
28503+ atomic_unchecked_t vbl_received;
28504
28505 u32 color_fmt;
28506 unsigned int front_offset;
28507diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28508index 429d5a0..7e899ed 100644
28509--- a/drivers/gpu/drm/r128/r128_irq.c
28510+++ b/drivers/gpu/drm/r128/r128_irq.c
28511@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28512 if (crtc != 0)
28513 return 0;
28514
28515- return atomic_read(&dev_priv->vbl_received);
28516+ return atomic_read_unchecked(&dev_priv->vbl_received);
28517 }
28518
28519 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28520@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28521 /* VBLANK interrupt */
28522 if (status & R128_CRTC_VBLANK_INT) {
28523 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28524- atomic_inc(&dev_priv->vbl_received);
28525+ atomic_inc_unchecked(&dev_priv->vbl_received);
28526 drm_handle_vblank(dev, 0);
28527 return IRQ_HANDLED;
28528 }
28529diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28530index a9e33ce..09edd4b 100644
28531--- a/drivers/gpu/drm/r128/r128_state.c
28532+++ b/drivers/gpu/drm/r128/r128_state.c
28533@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28534
28535 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28536 {
28537- if (atomic_read(&dev_priv->idle_count) == 0)
28538+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28539 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28540 else
28541- atomic_set(&dev_priv->idle_count, 0);
28542+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28543 }
28544
28545 #endif
28546diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28547index 5a82b6b..9e69c73 100644
28548--- a/drivers/gpu/drm/radeon/mkregtable.c
28549+++ b/drivers/gpu/drm/radeon/mkregtable.c
28550@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28551 regex_t mask_rex;
28552 regmatch_t match[4];
28553 char buf[1024];
28554- size_t end;
28555+ long end;
28556 int len;
28557 int done = 0;
28558 int r;
28559 unsigned o;
28560 struct offset *offset;
28561 char last_reg_s[10];
28562- int last_reg;
28563+ unsigned long last_reg;
28564
28565 if (regcomp
28566 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28567diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28568index 8227e76..ce0b195 100644
28569--- a/drivers/gpu/drm/radeon/radeon.h
28570+++ b/drivers/gpu/drm/radeon/radeon.h
28571@@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28572 */
28573 struct radeon_fence_driver {
28574 uint32_t scratch_reg;
28575- atomic_t seq;
28576+ atomic_unchecked_t seq;
28577 uint32_t last_seq;
28578 unsigned long last_jiffies;
28579 unsigned long last_timeout;
28580@@ -530,7 +530,7 @@ struct r600_blit_cp_primitives {
28581 int x2, int y2);
28582 void (*draw_auto)(struct radeon_device *rdev);
28583 void (*set_default_state)(struct radeon_device *rdev);
28584-};
28585+} __no_const;
28586
28587 struct r600_blit {
28588 struct mutex mutex;
28589@@ -954,7 +954,7 @@ struct radeon_asic {
28590 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28591 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28592 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28593-};
28594+} __no_const;
28595
28596 /*
28597 * Asic structures
28598diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28599index 9b39145..389b93b 100644
28600--- a/drivers/gpu/drm/radeon/radeon_device.c
28601+++ b/drivers/gpu/drm/radeon/radeon_device.c
28602@@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28603 bool can_switch;
28604
28605 spin_lock(&dev->count_lock);
28606- can_switch = (dev->open_count == 0);
28607+ can_switch = (local_read(&dev->open_count) == 0);
28608 spin_unlock(&dev->count_lock);
28609 return can_switch;
28610 }
28611diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28612index a1b59ca..86f2d44 100644
28613--- a/drivers/gpu/drm/radeon/radeon_drv.h
28614+++ b/drivers/gpu/drm/radeon/radeon_drv.h
28615@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28616
28617 /* SW interrupt */
28618 wait_queue_head_t swi_queue;
28619- atomic_t swi_emitted;
28620+ atomic_unchecked_t swi_emitted;
28621 int vblank_crtc;
28622 uint32_t irq_enable_reg;
28623 uint32_t r500_disp_irq_reg;
28624diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28625index 76ec0e9..6feb1a3 100644
28626--- a/drivers/gpu/drm/radeon/radeon_fence.c
28627+++ b/drivers/gpu/drm/radeon/radeon_fence.c
28628@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28629 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28630 return 0;
28631 }
28632- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28633+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28634 if (!rdev->cp.ready)
28635 /* FIXME: cp is not running assume everythings is done right
28636 * away
28637@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28638 return r;
28639 }
28640 radeon_fence_write(rdev, 0);
28641- atomic_set(&rdev->fence_drv.seq, 0);
28642+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28643 INIT_LIST_HEAD(&rdev->fence_drv.created);
28644 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28645 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28646diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28647index 48b7cea..342236f 100644
28648--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28649+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28650@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28651 request = compat_alloc_user_space(sizeof(*request));
28652 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28653 || __put_user(req32.param, &request->param)
28654- || __put_user((void __user *)(unsigned long)req32.value,
28655+ || __put_user((unsigned long)req32.value,
28656 &request->value))
28657 return -EFAULT;
28658
28659diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28660index 00da384..32f972d 100644
28661--- a/drivers/gpu/drm/radeon/radeon_irq.c
28662+++ b/drivers/gpu/drm/radeon/radeon_irq.c
28663@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28664 unsigned int ret;
28665 RING_LOCALS;
28666
28667- atomic_inc(&dev_priv->swi_emitted);
28668- ret = atomic_read(&dev_priv->swi_emitted);
28669+ atomic_inc_unchecked(&dev_priv->swi_emitted);
28670+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28671
28672 BEGIN_RING(4);
28673 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28674@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28675 drm_radeon_private_t *dev_priv =
28676 (drm_radeon_private_t *) dev->dev_private;
28677
28678- atomic_set(&dev_priv->swi_emitted, 0);
28679+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28680 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28681
28682 dev->max_vblank_count = 0x001fffff;
28683diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28684index e8422ae..d22d4a8 100644
28685--- a/drivers/gpu/drm/radeon/radeon_state.c
28686+++ b/drivers/gpu/drm/radeon/radeon_state.c
28687@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
28688 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28689 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28690
28691- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28692+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28693 sarea_priv->nbox * sizeof(depth_boxes[0])))
28694 return -EFAULT;
28695
28696@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
28697 {
28698 drm_radeon_private_t *dev_priv = dev->dev_private;
28699 drm_radeon_getparam_t *param = data;
28700- int value;
28701+ int value = 0;
28702
28703 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28704
28705diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
28706index 0b5468b..9c4b308 100644
28707--- a/drivers/gpu/drm/radeon/radeon_ttm.c
28708+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
28709@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28710 }
28711 if (unlikely(ttm_vm_ops == NULL)) {
28712 ttm_vm_ops = vma->vm_ops;
28713- radeon_ttm_vm_ops = *ttm_vm_ops;
28714- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28715+ pax_open_kernel();
28716+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
28717+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28718+ pax_close_kernel();
28719 }
28720 vma->vm_ops = &radeon_ttm_vm_ops;
28721 return 0;
28722diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
28723index a9049ed..501f284 100644
28724--- a/drivers/gpu/drm/radeon/rs690.c
28725+++ b/drivers/gpu/drm/radeon/rs690.c
28726@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
28727 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28728 rdev->pm.sideport_bandwidth.full)
28729 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28730- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
28731+ read_delay_latency.full = dfixed_const(800 * 1000);
28732 read_delay_latency.full = dfixed_div(read_delay_latency,
28733 rdev->pm.igp_sideport_mclk);
28734+ a.full = dfixed_const(370);
28735+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
28736 } else {
28737 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28738 rdev->pm.k8_bandwidth.full)
28739diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28740index 727e93d..1565650 100644
28741--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
28742+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28743@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
28744 static int ttm_pool_mm_shrink(struct shrinker *shrink,
28745 struct shrink_control *sc)
28746 {
28747- static atomic_t start_pool = ATOMIC_INIT(0);
28748+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
28749 unsigned i;
28750- unsigned pool_offset = atomic_add_return(1, &start_pool);
28751+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
28752 struct ttm_page_pool *pool;
28753 int shrink_pages = sc->nr_to_scan;
28754
28755diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
28756index 9cf87d9..2000b7d 100644
28757--- a/drivers/gpu/drm/via/via_drv.h
28758+++ b/drivers/gpu/drm/via/via_drv.h
28759@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28760 typedef uint32_t maskarray_t[5];
28761
28762 typedef struct drm_via_irq {
28763- atomic_t irq_received;
28764+ atomic_unchecked_t irq_received;
28765 uint32_t pending_mask;
28766 uint32_t enable_mask;
28767 wait_queue_head_t irq_queue;
28768@@ -75,7 +75,7 @@ typedef struct drm_via_private {
28769 struct timeval last_vblank;
28770 int last_vblank_valid;
28771 unsigned usec_per_vblank;
28772- atomic_t vbl_received;
28773+ atomic_unchecked_t vbl_received;
28774 drm_via_state_t hc_state;
28775 char pci_buf[VIA_PCI_BUF_SIZE];
28776 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28777diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
28778index d391f48..10c8ca3 100644
28779--- a/drivers/gpu/drm/via/via_irq.c
28780+++ b/drivers/gpu/drm/via/via_irq.c
28781@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
28782 if (crtc != 0)
28783 return 0;
28784
28785- return atomic_read(&dev_priv->vbl_received);
28786+ return atomic_read_unchecked(&dev_priv->vbl_received);
28787 }
28788
28789 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28790@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28791
28792 status = VIA_READ(VIA_REG_INTERRUPT);
28793 if (status & VIA_IRQ_VBLANK_PENDING) {
28794- atomic_inc(&dev_priv->vbl_received);
28795- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28796+ atomic_inc_unchecked(&dev_priv->vbl_received);
28797+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28798 do_gettimeofday(&cur_vblank);
28799 if (dev_priv->last_vblank_valid) {
28800 dev_priv->usec_per_vblank =
28801@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28802 dev_priv->last_vblank = cur_vblank;
28803 dev_priv->last_vblank_valid = 1;
28804 }
28805- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28806+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28807 DRM_DEBUG("US per vblank is: %u\n",
28808 dev_priv->usec_per_vblank);
28809 }
28810@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28811
28812 for (i = 0; i < dev_priv->num_irqs; ++i) {
28813 if (status & cur_irq->pending_mask) {
28814- atomic_inc(&cur_irq->irq_received);
28815+ atomic_inc_unchecked(&cur_irq->irq_received);
28816 DRM_WAKEUP(&cur_irq->irq_queue);
28817 handled = 1;
28818 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
28819@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
28820 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28821 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28822 masks[irq][4]));
28823- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28824+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28825 } else {
28826 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28827 (((cur_irq_sequence =
28828- atomic_read(&cur_irq->irq_received)) -
28829+ atomic_read_unchecked(&cur_irq->irq_received)) -
28830 *sequence) <= (1 << 23)));
28831 }
28832 *sequence = cur_irq_sequence;
28833@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
28834 }
28835
28836 for (i = 0; i < dev_priv->num_irqs; ++i) {
28837- atomic_set(&cur_irq->irq_received, 0);
28838+ atomic_set_unchecked(&cur_irq->irq_received, 0);
28839 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28840 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28841 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28842@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
28843 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28844 case VIA_IRQ_RELATIVE:
28845 irqwait->request.sequence +=
28846- atomic_read(&cur_irq->irq_received);
28847+ atomic_read_unchecked(&cur_irq->irq_received);
28848 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28849 case VIA_IRQ_ABSOLUTE:
28850 break;
28851diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28852index dc27970..f18b008 100644
28853--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28854+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28855@@ -260,7 +260,7 @@ struct vmw_private {
28856 * Fencing and IRQs.
28857 */
28858
28859- atomic_t marker_seq;
28860+ atomic_unchecked_t marker_seq;
28861 wait_queue_head_t fence_queue;
28862 wait_queue_head_t fifo_queue;
28863 int fence_queue_waiters; /* Protected by hw_mutex */
28864diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28865index a0c2f12..68ae6cb 100644
28866--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28867+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28868@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
28869 (unsigned int) min,
28870 (unsigned int) fifo->capabilities);
28871
28872- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28873+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28874 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
28875 vmw_marker_queue_init(&fifo->marker_queue);
28876 return vmw_fifo_send_fence(dev_priv, &dummy);
28877@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
28878 if (reserveable)
28879 iowrite32(bytes, fifo_mem +
28880 SVGA_FIFO_RESERVED);
28881- return fifo_mem + (next_cmd >> 2);
28882+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
28883 } else {
28884 need_bounce = true;
28885 }
28886@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
28887
28888 fm = vmw_fifo_reserve(dev_priv, bytes);
28889 if (unlikely(fm == NULL)) {
28890- *seqno = atomic_read(&dev_priv->marker_seq);
28891+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
28892 ret = -ENOMEM;
28893 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
28894 false, 3*HZ);
28895@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
28896 }
28897
28898 do {
28899- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
28900+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
28901 } while (*seqno == 0);
28902
28903 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
28904diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28905index cabc95f..14b3d77 100644
28906--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28907+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28908@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
28909 * emitted. Then the fence is stale and signaled.
28910 */
28911
28912- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
28913+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
28914 > VMW_FENCE_WRAP);
28915
28916 return ret;
28917@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
28918
28919 if (fifo_idle)
28920 down_read(&fifo_state->rwsem);
28921- signal_seq = atomic_read(&dev_priv->marker_seq);
28922+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
28923 ret = 0;
28924
28925 for (;;) {
28926diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
28927index 8a8725c..afed796 100644
28928--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
28929+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
28930@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
28931 while (!vmw_lag_lt(queue, us)) {
28932 spin_lock(&queue->lock);
28933 if (list_empty(&queue->head))
28934- seqno = atomic_read(&dev_priv->marker_seq);
28935+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
28936 else {
28937 marker = list_first_entry(&queue->head,
28938 struct vmw_marker, head);
28939diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
28940index bb656d8..4169fca 100644
28941--- a/drivers/hid/hid-core.c
28942+++ b/drivers/hid/hid-core.c
28943@@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev)
28944
28945 int hid_add_device(struct hid_device *hdev)
28946 {
28947- static atomic_t id = ATOMIC_INIT(0);
28948+ static atomic_unchecked_t id = ATOMIC_INIT(0);
28949 int ret;
28950
28951 if (WARN_ON(hdev->status & HID_STAT_ADDED))
28952@@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev)
28953 /* XXX hack, any other cleaner solution after the driver core
28954 * is converted to allow more than 20 bytes as the device name? */
28955 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
28956- hdev->vendor, hdev->product, atomic_inc_return(&id));
28957+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
28958
28959 hid_debug_register(hdev, dev_name(&hdev->dev));
28960 ret = device_add(&hdev->dev);
28961diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
28962index 4ef02b2..8a96831 100644
28963--- a/drivers/hid/usbhid/hiddev.c
28964+++ b/drivers/hid/usbhid/hiddev.c
28965@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
28966 break;
28967
28968 case HIDIOCAPPLICATION:
28969- if (arg < 0 || arg >= hid->maxapplication)
28970+ if (arg >= hid->maxapplication)
28971 break;
28972
28973 for (i = 0; i < hid->maxcollection; i++)
28974diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
28975index 4065374..10ed7dc 100644
28976--- a/drivers/hv/channel.c
28977+++ b/drivers/hv/channel.c
28978@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
28979 int ret = 0;
28980 int t;
28981
28982- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
28983- atomic_inc(&vmbus_connection.next_gpadl_handle);
28984+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
28985+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
28986
28987 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
28988 if (ret)
28989diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
28990index 0fb100e..baf87e5 100644
28991--- a/drivers/hv/hv.c
28992+++ b/drivers/hv/hv.c
28993@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
28994 u64 output_address = (output) ? virt_to_phys(output) : 0;
28995 u32 output_address_hi = output_address >> 32;
28996 u32 output_address_lo = output_address & 0xFFFFFFFF;
28997- void *hypercall_page = hv_context.hypercall_page;
28998+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
28999
29000 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
29001 "=a"(hv_status_lo) : "d" (control_hi),
29002diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
29003index 0aee112..b72d21f 100644
29004--- a/drivers/hv/hyperv_vmbus.h
29005+++ b/drivers/hv/hyperv_vmbus.h
29006@@ -556,7 +556,7 @@ enum vmbus_connect_state {
29007 struct vmbus_connection {
29008 enum vmbus_connect_state conn_state;
29009
29010- atomic_t next_gpadl_handle;
29011+ atomic_unchecked_t next_gpadl_handle;
29012
29013 /*
29014 * Represents channel interrupts. Each bit position represents a
29015diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
29016index d2d0a2a..90b8f4d 100644
29017--- a/drivers/hv/vmbus_drv.c
29018+++ b/drivers/hv/vmbus_drv.c
29019@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
29020 {
29021 int ret = 0;
29022
29023- static atomic_t device_num = ATOMIC_INIT(0);
29024+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
29025
29026 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
29027- atomic_inc_return(&device_num));
29028+ atomic_inc_return_unchecked(&device_num));
29029
29030 child_device_obj->device.bus = &hv_bus;
29031 child_device_obj->device.parent = &hv_acpi_dev->dev;
29032diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29033index 66f6729..2d6de0a 100644
29034--- a/drivers/hwmon/acpi_power_meter.c
29035+++ b/drivers/hwmon/acpi_power_meter.c
29036@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29037 return res;
29038
29039 temp /= 1000;
29040- if (temp < 0)
29041- return -EINVAL;
29042
29043 mutex_lock(&resource->lock);
29044 resource->trip[attr->index - 7] = temp;
29045diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29046index fe4104c..346febb 100644
29047--- a/drivers/hwmon/sht15.c
29048+++ b/drivers/hwmon/sht15.c
29049@@ -166,7 +166,7 @@ struct sht15_data {
29050 int supply_uV;
29051 bool supply_uV_valid;
29052 struct work_struct update_supply_work;
29053- atomic_t interrupt_handled;
29054+ atomic_unchecked_t interrupt_handled;
29055 };
29056
29057 /**
29058@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29059 return ret;
29060
29061 gpio_direction_input(data->pdata->gpio_data);
29062- atomic_set(&data->interrupt_handled, 0);
29063+ atomic_set_unchecked(&data->interrupt_handled, 0);
29064
29065 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29066 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29067 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29068 /* Only relevant if the interrupt hasn't occurred. */
29069- if (!atomic_read(&data->interrupt_handled))
29070+ if (!atomic_read_unchecked(&data->interrupt_handled))
29071 schedule_work(&data->read_work);
29072 }
29073 ret = wait_event_timeout(data->wait_queue,
29074@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29075
29076 /* First disable the interrupt */
29077 disable_irq_nosync(irq);
29078- atomic_inc(&data->interrupt_handled);
29079+ atomic_inc_unchecked(&data->interrupt_handled);
29080 /* Then schedule a reading work struct */
29081 if (data->state != SHT15_READING_NOTHING)
29082 schedule_work(&data->read_work);
29083@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29084 * If not, then start the interrupt again - care here as could
29085 * have gone low in meantime so verify it hasn't!
29086 */
29087- atomic_set(&data->interrupt_handled, 0);
29088+ atomic_set_unchecked(&data->interrupt_handled, 0);
29089 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29090 /* If still not occurred or another handler has been scheduled */
29091 if (gpio_get_value(data->pdata->gpio_data)
29092- || atomic_read(&data->interrupt_handled))
29093+ || atomic_read_unchecked(&data->interrupt_handled))
29094 return;
29095 }
29096
29097diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29098index 378fcb5..5e91fa8 100644
29099--- a/drivers/i2c/busses/i2c-amd756-s4882.c
29100+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29101@@ -43,7 +43,7 @@
29102 extern struct i2c_adapter amd756_smbus;
29103
29104 static struct i2c_adapter *s4882_adapter;
29105-static struct i2c_algorithm *s4882_algo;
29106+static i2c_algorithm_no_const *s4882_algo;
29107
29108 /* Wrapper access functions for multiplexed SMBus */
29109 static DEFINE_MUTEX(amd756_lock);
29110diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29111index 29015eb..af2d8e9 100644
29112--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29113+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29114@@ -41,7 +41,7 @@
29115 extern struct i2c_adapter *nforce2_smbus;
29116
29117 static struct i2c_adapter *s4985_adapter;
29118-static struct i2c_algorithm *s4985_algo;
29119+static i2c_algorithm_no_const *s4985_algo;
29120
29121 /* Wrapper access functions for multiplexed SMBus */
29122 static DEFINE_MUTEX(nforce2_lock);
29123diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29124index d7a4833..7fae376 100644
29125--- a/drivers/i2c/i2c-mux.c
29126+++ b/drivers/i2c/i2c-mux.c
29127@@ -28,7 +28,7 @@
29128 /* multiplexer per channel data */
29129 struct i2c_mux_priv {
29130 struct i2c_adapter adap;
29131- struct i2c_algorithm algo;
29132+ i2c_algorithm_no_const algo;
29133
29134 struct i2c_adapter *parent;
29135 void *mux_dev; /* the mux chip/device */
29136diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29137index 57d00ca..0145194 100644
29138--- a/drivers/ide/aec62xx.c
29139+++ b/drivers/ide/aec62xx.c
29140@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29141 .cable_detect = atp86x_cable_detect,
29142 };
29143
29144-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29145+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29146 { /* 0: AEC6210 */
29147 .name = DRV_NAME,
29148 .init_chipset = init_chipset_aec62xx,
29149diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29150index 2c8016a..911a27c 100644
29151--- a/drivers/ide/alim15x3.c
29152+++ b/drivers/ide/alim15x3.c
29153@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29154 .dma_sff_read_status = ide_dma_sff_read_status,
29155 };
29156
29157-static const struct ide_port_info ali15x3_chipset __devinitdata = {
29158+static const struct ide_port_info ali15x3_chipset __devinitconst = {
29159 .name = DRV_NAME,
29160 .init_chipset = init_chipset_ali15x3,
29161 .init_hwif = init_hwif_ali15x3,
29162diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29163index 3747b25..56fc995 100644
29164--- a/drivers/ide/amd74xx.c
29165+++ b/drivers/ide/amd74xx.c
29166@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29167 .udma_mask = udma, \
29168 }
29169
29170-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29171+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29172 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29173 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29174 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29175diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29176index 15f0ead..cb43480 100644
29177--- a/drivers/ide/atiixp.c
29178+++ b/drivers/ide/atiixp.c
29179@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29180 .cable_detect = atiixp_cable_detect,
29181 };
29182
29183-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29184+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29185 { /* 0: IXP200/300/400/700 */
29186 .name = DRV_NAME,
29187 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29188diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29189index 5f80312..d1fc438 100644
29190--- a/drivers/ide/cmd64x.c
29191+++ b/drivers/ide/cmd64x.c
29192@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29193 .dma_sff_read_status = ide_dma_sff_read_status,
29194 };
29195
29196-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29197+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29198 { /* 0: CMD643 */
29199 .name = DRV_NAME,
29200 .init_chipset = init_chipset_cmd64x,
29201diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29202index 2c1e5f7..1444762 100644
29203--- a/drivers/ide/cs5520.c
29204+++ b/drivers/ide/cs5520.c
29205@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29206 .set_dma_mode = cs5520_set_dma_mode,
29207 };
29208
29209-static const struct ide_port_info cyrix_chipset __devinitdata = {
29210+static const struct ide_port_info cyrix_chipset __devinitconst = {
29211 .name = DRV_NAME,
29212 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29213 .port_ops = &cs5520_port_ops,
29214diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29215index 4dc4eb9..49b40ad 100644
29216--- a/drivers/ide/cs5530.c
29217+++ b/drivers/ide/cs5530.c
29218@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29219 .udma_filter = cs5530_udma_filter,
29220 };
29221
29222-static const struct ide_port_info cs5530_chipset __devinitdata = {
29223+static const struct ide_port_info cs5530_chipset __devinitconst = {
29224 .name = DRV_NAME,
29225 .init_chipset = init_chipset_cs5530,
29226 .init_hwif = init_hwif_cs5530,
29227diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29228index 5059faf..18d4c85 100644
29229--- a/drivers/ide/cs5535.c
29230+++ b/drivers/ide/cs5535.c
29231@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29232 .cable_detect = cs5535_cable_detect,
29233 };
29234
29235-static const struct ide_port_info cs5535_chipset __devinitdata = {
29236+static const struct ide_port_info cs5535_chipset __devinitconst = {
29237 .name = DRV_NAME,
29238 .port_ops = &cs5535_port_ops,
29239 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29240diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29241index 847553f..3ffb49d 100644
29242--- a/drivers/ide/cy82c693.c
29243+++ b/drivers/ide/cy82c693.c
29244@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29245 .set_dma_mode = cy82c693_set_dma_mode,
29246 };
29247
29248-static const struct ide_port_info cy82c693_chipset __devinitdata = {
29249+static const struct ide_port_info cy82c693_chipset __devinitconst = {
29250 .name = DRV_NAME,
29251 .init_iops = init_iops_cy82c693,
29252 .port_ops = &cy82c693_port_ops,
29253diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29254index 58c51cd..4aec3b8 100644
29255--- a/drivers/ide/hpt366.c
29256+++ b/drivers/ide/hpt366.c
29257@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29258 }
29259 };
29260
29261-static const struct hpt_info hpt36x __devinitdata = {
29262+static const struct hpt_info hpt36x __devinitconst = {
29263 .chip_name = "HPT36x",
29264 .chip_type = HPT36x,
29265 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29266@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29267 .timings = &hpt36x_timings
29268 };
29269
29270-static const struct hpt_info hpt370 __devinitdata = {
29271+static const struct hpt_info hpt370 __devinitconst = {
29272 .chip_name = "HPT370",
29273 .chip_type = HPT370,
29274 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29275@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29276 .timings = &hpt37x_timings
29277 };
29278
29279-static const struct hpt_info hpt370a __devinitdata = {
29280+static const struct hpt_info hpt370a __devinitconst = {
29281 .chip_name = "HPT370A",
29282 .chip_type = HPT370A,
29283 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29284@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29285 .timings = &hpt37x_timings
29286 };
29287
29288-static const struct hpt_info hpt374 __devinitdata = {
29289+static const struct hpt_info hpt374 __devinitconst = {
29290 .chip_name = "HPT374",
29291 .chip_type = HPT374,
29292 .udma_mask = ATA_UDMA5,
29293@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29294 .timings = &hpt37x_timings
29295 };
29296
29297-static const struct hpt_info hpt372 __devinitdata = {
29298+static const struct hpt_info hpt372 __devinitconst = {
29299 .chip_name = "HPT372",
29300 .chip_type = HPT372,
29301 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29302@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29303 .timings = &hpt37x_timings
29304 };
29305
29306-static const struct hpt_info hpt372a __devinitdata = {
29307+static const struct hpt_info hpt372a __devinitconst = {
29308 .chip_name = "HPT372A",
29309 .chip_type = HPT372A,
29310 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29311@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29312 .timings = &hpt37x_timings
29313 };
29314
29315-static const struct hpt_info hpt302 __devinitdata = {
29316+static const struct hpt_info hpt302 __devinitconst = {
29317 .chip_name = "HPT302",
29318 .chip_type = HPT302,
29319 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29320@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29321 .timings = &hpt37x_timings
29322 };
29323
29324-static const struct hpt_info hpt371 __devinitdata = {
29325+static const struct hpt_info hpt371 __devinitconst = {
29326 .chip_name = "HPT371",
29327 .chip_type = HPT371,
29328 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29329@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29330 .timings = &hpt37x_timings
29331 };
29332
29333-static const struct hpt_info hpt372n __devinitdata = {
29334+static const struct hpt_info hpt372n __devinitconst = {
29335 .chip_name = "HPT372N",
29336 .chip_type = HPT372N,
29337 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29338@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29339 .timings = &hpt37x_timings
29340 };
29341
29342-static const struct hpt_info hpt302n __devinitdata = {
29343+static const struct hpt_info hpt302n __devinitconst = {
29344 .chip_name = "HPT302N",
29345 .chip_type = HPT302N,
29346 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29347@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29348 .timings = &hpt37x_timings
29349 };
29350
29351-static const struct hpt_info hpt371n __devinitdata = {
29352+static const struct hpt_info hpt371n __devinitconst = {
29353 .chip_name = "HPT371N",
29354 .chip_type = HPT371N,
29355 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29356@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29357 .dma_sff_read_status = ide_dma_sff_read_status,
29358 };
29359
29360-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29361+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29362 { /* 0: HPT36x */
29363 .name = DRV_NAME,
29364 .init_chipset = init_chipset_hpt366,
29365diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29366index 8126824..55a2798 100644
29367--- a/drivers/ide/ide-cd.c
29368+++ b/drivers/ide/ide-cd.c
29369@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29370 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29371 if ((unsigned long)buf & alignment
29372 || blk_rq_bytes(rq) & q->dma_pad_mask
29373- || object_is_on_stack(buf))
29374+ || object_starts_on_stack(buf))
29375 drive->dma = 0;
29376 }
29377 }
29378diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29379index a743e68..1cfd674 100644
29380--- a/drivers/ide/ide-pci-generic.c
29381+++ b/drivers/ide/ide-pci-generic.c
29382@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29383 .udma_mask = ATA_UDMA6, \
29384 }
29385
29386-static const struct ide_port_info generic_chipsets[] __devinitdata = {
29387+static const struct ide_port_info generic_chipsets[] __devinitconst = {
29388 /* 0: Unknown */
29389 DECLARE_GENERIC_PCI_DEV(0),
29390
29391diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29392index 560e66d..d5dd180 100644
29393--- a/drivers/ide/it8172.c
29394+++ b/drivers/ide/it8172.c
29395@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29396 .set_dma_mode = it8172_set_dma_mode,
29397 };
29398
29399-static const struct ide_port_info it8172_port_info __devinitdata = {
29400+static const struct ide_port_info it8172_port_info __devinitconst = {
29401 .name = DRV_NAME,
29402 .port_ops = &it8172_port_ops,
29403 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29404diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29405index 46816ba..1847aeb 100644
29406--- a/drivers/ide/it8213.c
29407+++ b/drivers/ide/it8213.c
29408@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29409 .cable_detect = it8213_cable_detect,
29410 };
29411
29412-static const struct ide_port_info it8213_chipset __devinitdata = {
29413+static const struct ide_port_info it8213_chipset __devinitconst = {
29414 .name = DRV_NAME,
29415 .enablebits = { {0x41, 0x80, 0x80} },
29416 .port_ops = &it8213_port_ops,
29417diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29418index 2e3169f..c5611db 100644
29419--- a/drivers/ide/it821x.c
29420+++ b/drivers/ide/it821x.c
29421@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29422 .cable_detect = it821x_cable_detect,
29423 };
29424
29425-static const struct ide_port_info it821x_chipset __devinitdata = {
29426+static const struct ide_port_info it821x_chipset __devinitconst = {
29427 .name = DRV_NAME,
29428 .init_chipset = init_chipset_it821x,
29429 .init_hwif = init_hwif_it821x,
29430diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29431index 74c2c4a..efddd7d 100644
29432--- a/drivers/ide/jmicron.c
29433+++ b/drivers/ide/jmicron.c
29434@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29435 .cable_detect = jmicron_cable_detect,
29436 };
29437
29438-static const struct ide_port_info jmicron_chipset __devinitdata = {
29439+static const struct ide_port_info jmicron_chipset __devinitconst = {
29440 .name = DRV_NAME,
29441 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29442 .port_ops = &jmicron_port_ops,
29443diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29444index 95327a2..73f78d8 100644
29445--- a/drivers/ide/ns87415.c
29446+++ b/drivers/ide/ns87415.c
29447@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29448 .dma_sff_read_status = superio_dma_sff_read_status,
29449 };
29450
29451-static const struct ide_port_info ns87415_chipset __devinitdata = {
29452+static const struct ide_port_info ns87415_chipset __devinitconst = {
29453 .name = DRV_NAME,
29454 .init_hwif = init_hwif_ns87415,
29455 .tp_ops = &ns87415_tp_ops,
29456diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29457index 1a53a4c..39edc66 100644
29458--- a/drivers/ide/opti621.c
29459+++ b/drivers/ide/opti621.c
29460@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29461 .set_pio_mode = opti621_set_pio_mode,
29462 };
29463
29464-static const struct ide_port_info opti621_chipset __devinitdata = {
29465+static const struct ide_port_info opti621_chipset __devinitconst = {
29466 .name = DRV_NAME,
29467 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29468 .port_ops = &opti621_port_ops,
29469diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29470index 9546fe2..2e5ceb6 100644
29471--- a/drivers/ide/pdc202xx_new.c
29472+++ b/drivers/ide/pdc202xx_new.c
29473@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29474 .udma_mask = udma, \
29475 }
29476
29477-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29478+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29479 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29480 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29481 };
29482diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29483index 3a35ec6..5634510 100644
29484--- a/drivers/ide/pdc202xx_old.c
29485+++ b/drivers/ide/pdc202xx_old.c
29486@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29487 .max_sectors = sectors, \
29488 }
29489
29490-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29491+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29492 { /* 0: PDC20246 */
29493 .name = DRV_NAME,
29494 .init_chipset = init_chipset_pdc202xx,
29495diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29496index 1892e81..fe0fd60 100644
29497--- a/drivers/ide/piix.c
29498+++ b/drivers/ide/piix.c
29499@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29500 .udma_mask = udma, \
29501 }
29502
29503-static const struct ide_port_info piix_pci_info[] __devinitdata = {
29504+static const struct ide_port_info piix_pci_info[] __devinitconst = {
29505 /* 0: MPIIX */
29506 { /*
29507 * MPIIX actually has only a single IDE channel mapped to
29508diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29509index a6414a8..c04173e 100644
29510--- a/drivers/ide/rz1000.c
29511+++ b/drivers/ide/rz1000.c
29512@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29513 }
29514 }
29515
29516-static const struct ide_port_info rz1000_chipset __devinitdata = {
29517+static const struct ide_port_info rz1000_chipset __devinitconst = {
29518 .name = DRV_NAME,
29519 .host_flags = IDE_HFLAG_NO_DMA,
29520 };
29521diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29522index 356b9b5..d4758eb 100644
29523--- a/drivers/ide/sc1200.c
29524+++ b/drivers/ide/sc1200.c
29525@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29526 .dma_sff_read_status = ide_dma_sff_read_status,
29527 };
29528
29529-static const struct ide_port_info sc1200_chipset __devinitdata = {
29530+static const struct ide_port_info sc1200_chipset __devinitconst = {
29531 .name = DRV_NAME,
29532 .port_ops = &sc1200_port_ops,
29533 .dma_ops = &sc1200_dma_ops,
29534diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29535index b7f5b0c..9701038 100644
29536--- a/drivers/ide/scc_pata.c
29537+++ b/drivers/ide/scc_pata.c
29538@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29539 .dma_sff_read_status = scc_dma_sff_read_status,
29540 };
29541
29542-static const struct ide_port_info scc_chipset __devinitdata = {
29543+static const struct ide_port_info scc_chipset __devinitconst = {
29544 .name = "sccIDE",
29545 .init_iops = init_iops_scc,
29546 .init_dma = scc_init_dma,
29547diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29548index 35fb8da..24d72ef 100644
29549--- a/drivers/ide/serverworks.c
29550+++ b/drivers/ide/serverworks.c
29551@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29552 .cable_detect = svwks_cable_detect,
29553 };
29554
29555-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29556+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29557 { /* 0: OSB4 */
29558 .name = DRV_NAME,
29559 .init_chipset = init_chipset_svwks,
29560diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29561index ddeda44..46f7e30 100644
29562--- a/drivers/ide/siimage.c
29563+++ b/drivers/ide/siimage.c
29564@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29565 .udma_mask = ATA_UDMA6, \
29566 }
29567
29568-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29569+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29570 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29571 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29572 };
29573diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29574index 4a00225..09e61b4 100644
29575--- a/drivers/ide/sis5513.c
29576+++ b/drivers/ide/sis5513.c
29577@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29578 .cable_detect = sis_cable_detect,
29579 };
29580
29581-static const struct ide_port_info sis5513_chipset __devinitdata = {
29582+static const struct ide_port_info sis5513_chipset __devinitconst = {
29583 .name = DRV_NAME,
29584 .init_chipset = init_chipset_sis5513,
29585 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29586diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29587index f21dc2a..d051cd2 100644
29588--- a/drivers/ide/sl82c105.c
29589+++ b/drivers/ide/sl82c105.c
29590@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29591 .dma_sff_read_status = ide_dma_sff_read_status,
29592 };
29593
29594-static const struct ide_port_info sl82c105_chipset __devinitdata = {
29595+static const struct ide_port_info sl82c105_chipset __devinitconst = {
29596 .name = DRV_NAME,
29597 .init_chipset = init_chipset_sl82c105,
29598 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29599diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29600index 864ffe0..863a5e9 100644
29601--- a/drivers/ide/slc90e66.c
29602+++ b/drivers/ide/slc90e66.c
29603@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29604 .cable_detect = slc90e66_cable_detect,
29605 };
29606
29607-static const struct ide_port_info slc90e66_chipset __devinitdata = {
29608+static const struct ide_port_info slc90e66_chipset __devinitconst = {
29609 .name = DRV_NAME,
29610 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29611 .port_ops = &slc90e66_port_ops,
29612diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29613index 4799d5c..1794678 100644
29614--- a/drivers/ide/tc86c001.c
29615+++ b/drivers/ide/tc86c001.c
29616@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29617 .dma_sff_read_status = ide_dma_sff_read_status,
29618 };
29619
29620-static const struct ide_port_info tc86c001_chipset __devinitdata = {
29621+static const struct ide_port_info tc86c001_chipset __devinitconst = {
29622 .name = DRV_NAME,
29623 .init_hwif = init_hwif_tc86c001,
29624 .port_ops = &tc86c001_port_ops,
29625diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29626index 281c914..55ce1b8 100644
29627--- a/drivers/ide/triflex.c
29628+++ b/drivers/ide/triflex.c
29629@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29630 .set_dma_mode = triflex_set_mode,
29631 };
29632
29633-static const struct ide_port_info triflex_device __devinitdata = {
29634+static const struct ide_port_info triflex_device __devinitconst = {
29635 .name = DRV_NAME,
29636 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29637 .port_ops = &triflex_port_ops,
29638diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29639index 4b42ca0..e494a98 100644
29640--- a/drivers/ide/trm290.c
29641+++ b/drivers/ide/trm290.c
29642@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29643 .dma_check = trm290_dma_check,
29644 };
29645
29646-static const struct ide_port_info trm290_chipset __devinitdata = {
29647+static const struct ide_port_info trm290_chipset __devinitconst = {
29648 .name = DRV_NAME,
29649 .init_hwif = init_hwif_trm290,
29650 .tp_ops = &trm290_tp_ops,
29651diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29652index f46f49c..eb77678 100644
29653--- a/drivers/ide/via82cxxx.c
29654+++ b/drivers/ide/via82cxxx.c
29655@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29656 .cable_detect = via82cxxx_cable_detect,
29657 };
29658
29659-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29660+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29661 .name = DRV_NAME,
29662 .init_chipset = init_chipset_via82cxxx,
29663 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29664diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
29665index eb0e2cc..14241c7 100644
29666--- a/drivers/ieee802154/fakehard.c
29667+++ b/drivers/ieee802154/fakehard.c
29668@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
29669 phy->transmit_power = 0xbf;
29670
29671 dev->netdev_ops = &fake_ops;
29672- dev->ml_priv = &fake_mlme;
29673+ dev->ml_priv = (void *)&fake_mlme;
29674
29675 priv = netdev_priv(dev);
29676 priv->phy = phy;
29677diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29678index 8b72f39..55df4c8 100644
29679--- a/drivers/infiniband/core/cm.c
29680+++ b/drivers/infiniband/core/cm.c
29681@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29682
29683 struct cm_counter_group {
29684 struct kobject obj;
29685- atomic_long_t counter[CM_ATTR_COUNT];
29686+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29687 };
29688
29689 struct cm_counter_attribute {
29690@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
29691 struct ib_mad_send_buf *msg = NULL;
29692 int ret;
29693
29694- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29695+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29696 counter[CM_REQ_COUNTER]);
29697
29698 /* Quick state check to discard duplicate REQs. */
29699@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
29700 if (!cm_id_priv)
29701 return;
29702
29703- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29704+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29705 counter[CM_REP_COUNTER]);
29706 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29707 if (ret)
29708@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
29709 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29710 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29711 spin_unlock_irq(&cm_id_priv->lock);
29712- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29713+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29714 counter[CM_RTU_COUNTER]);
29715 goto out;
29716 }
29717@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
29718 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29719 dreq_msg->local_comm_id);
29720 if (!cm_id_priv) {
29721- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29722+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29723 counter[CM_DREQ_COUNTER]);
29724 cm_issue_drep(work->port, work->mad_recv_wc);
29725 return -EINVAL;
29726@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
29727 case IB_CM_MRA_REP_RCVD:
29728 break;
29729 case IB_CM_TIMEWAIT:
29730- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29731+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29732 counter[CM_DREQ_COUNTER]);
29733 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29734 goto unlock;
29735@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
29736 cm_free_msg(msg);
29737 goto deref;
29738 case IB_CM_DREQ_RCVD:
29739- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29740+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29741 counter[CM_DREQ_COUNTER]);
29742 goto unlock;
29743 default:
29744@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
29745 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29746 cm_id_priv->msg, timeout)) {
29747 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29748- atomic_long_inc(&work->port->
29749+ atomic_long_inc_unchecked(&work->port->
29750 counter_group[CM_RECV_DUPLICATES].
29751 counter[CM_MRA_COUNTER]);
29752 goto out;
29753@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
29754 break;
29755 case IB_CM_MRA_REQ_RCVD:
29756 case IB_CM_MRA_REP_RCVD:
29757- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29758+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29759 counter[CM_MRA_COUNTER]);
29760 /* fall through */
29761 default:
29762@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
29763 case IB_CM_LAP_IDLE:
29764 break;
29765 case IB_CM_MRA_LAP_SENT:
29766- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29767+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29768 counter[CM_LAP_COUNTER]);
29769 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29770 goto unlock;
29771@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
29772 cm_free_msg(msg);
29773 goto deref;
29774 case IB_CM_LAP_RCVD:
29775- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29776+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29777 counter[CM_LAP_COUNTER]);
29778 goto unlock;
29779 default:
29780@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
29781 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29782 if (cur_cm_id_priv) {
29783 spin_unlock_irq(&cm.lock);
29784- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29785+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29786 counter[CM_SIDR_REQ_COUNTER]);
29787 goto out; /* Duplicate message. */
29788 }
29789@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
29790 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29791 msg->retries = 1;
29792
29793- atomic_long_add(1 + msg->retries,
29794+ atomic_long_add_unchecked(1 + msg->retries,
29795 &port->counter_group[CM_XMIT].counter[attr_index]);
29796 if (msg->retries)
29797- atomic_long_add(msg->retries,
29798+ atomic_long_add_unchecked(msg->retries,
29799 &port->counter_group[CM_XMIT_RETRIES].
29800 counter[attr_index]);
29801
29802@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
29803 }
29804
29805 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29806- atomic_long_inc(&port->counter_group[CM_RECV].
29807+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29808 counter[attr_id - CM_ATTR_ID_OFFSET]);
29809
29810 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29811@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
29812 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29813
29814 return sprintf(buf, "%ld\n",
29815- atomic_long_read(&group->counter[cm_attr->index]));
29816+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29817 }
29818
29819 static const struct sysfs_ops cm_counter_ops = {
29820diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
29821index 176c8f9..2627b62 100644
29822--- a/drivers/infiniband/core/fmr_pool.c
29823+++ b/drivers/infiniband/core/fmr_pool.c
29824@@ -98,8 +98,8 @@ struct ib_fmr_pool {
29825
29826 struct task_struct *thread;
29827
29828- atomic_t req_ser;
29829- atomic_t flush_ser;
29830+ atomic_unchecked_t req_ser;
29831+ atomic_unchecked_t flush_ser;
29832
29833 wait_queue_head_t force_wait;
29834 };
29835@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29836 struct ib_fmr_pool *pool = pool_ptr;
29837
29838 do {
29839- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29840+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29841 ib_fmr_batch_release(pool);
29842
29843- atomic_inc(&pool->flush_ser);
29844+ atomic_inc_unchecked(&pool->flush_ser);
29845 wake_up_interruptible(&pool->force_wait);
29846
29847 if (pool->flush_function)
29848@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29849 }
29850
29851 set_current_state(TASK_INTERRUPTIBLE);
29852- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29853+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29854 !kthread_should_stop())
29855 schedule();
29856 __set_current_state(TASK_RUNNING);
29857@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
29858 pool->dirty_watermark = params->dirty_watermark;
29859 pool->dirty_len = 0;
29860 spin_lock_init(&pool->pool_lock);
29861- atomic_set(&pool->req_ser, 0);
29862- atomic_set(&pool->flush_ser, 0);
29863+ atomic_set_unchecked(&pool->req_ser, 0);
29864+ atomic_set_unchecked(&pool->flush_ser, 0);
29865 init_waitqueue_head(&pool->force_wait);
29866
29867 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29868@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
29869 }
29870 spin_unlock_irq(&pool->pool_lock);
29871
29872- serial = atomic_inc_return(&pool->req_ser);
29873+ serial = atomic_inc_return_unchecked(&pool->req_ser);
29874 wake_up_process(pool->thread);
29875
29876 if (wait_event_interruptible(pool->force_wait,
29877- atomic_read(&pool->flush_ser) - serial >= 0))
29878+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29879 return -EINTR;
29880
29881 return 0;
29882@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
29883 } else {
29884 list_add_tail(&fmr->list, &pool->dirty_list);
29885 if (++pool->dirty_len >= pool->dirty_watermark) {
29886- atomic_inc(&pool->req_ser);
29887+ atomic_inc_unchecked(&pool->req_ser);
29888 wake_up_process(pool->thread);
29889 }
29890 }
29891diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
29892index 40c8353..946b0e4 100644
29893--- a/drivers/infiniband/hw/cxgb4/mem.c
29894+++ b/drivers/infiniband/hw/cxgb4/mem.c
29895@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
29896 int err;
29897 struct fw_ri_tpte tpt;
29898 u32 stag_idx;
29899- static atomic_t key;
29900+ static atomic_unchecked_t key;
29901
29902 if (c4iw_fatal_error(rdev))
29903 return -EIO;
29904@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
29905 &rdev->resource.tpt_fifo_lock);
29906 if (!stag_idx)
29907 return -ENOMEM;
29908- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
29909+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
29910 }
29911 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
29912 __func__, stag_state, type, pdid, stag_idx);
29913diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
29914index 79b3dbc..96e5fcc 100644
29915--- a/drivers/infiniband/hw/ipath/ipath_rc.c
29916+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
29917@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
29918 struct ib_atomic_eth *ateth;
29919 struct ipath_ack_entry *e;
29920 u64 vaddr;
29921- atomic64_t *maddr;
29922+ atomic64_unchecked_t *maddr;
29923 u64 sdata;
29924 u32 rkey;
29925 u8 next;
29926@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
29927 IB_ACCESS_REMOTE_ATOMIC)))
29928 goto nack_acc_unlck;
29929 /* Perform atomic OP and save result. */
29930- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
29931+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
29932 sdata = be64_to_cpu(ateth->swap_data);
29933 e = &qp->s_ack_queue[qp->r_head_ack_queue];
29934 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
29935- (u64) atomic64_add_return(sdata, maddr) - sdata :
29936+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
29937 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
29938 be64_to_cpu(ateth->compare_data),
29939 sdata);
29940diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
29941index 1f95bba..9530f87 100644
29942--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
29943+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
29944@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
29945 unsigned long flags;
29946 struct ib_wc wc;
29947 u64 sdata;
29948- atomic64_t *maddr;
29949+ atomic64_unchecked_t *maddr;
29950 enum ib_wc_status send_status;
29951
29952 /*
29953@@ -382,11 +382,11 @@ again:
29954 IB_ACCESS_REMOTE_ATOMIC)))
29955 goto acc_err;
29956 /* Perform atomic OP and save result. */
29957- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
29958+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
29959 sdata = wqe->wr.wr.atomic.compare_add;
29960 *(u64 *) sqp->s_sge.sge.vaddr =
29961 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
29962- (u64) atomic64_add_return(sdata, maddr) - sdata :
29963+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
29964 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
29965 sdata, wqe->wr.wr.atomic.swap);
29966 goto send_comp;
29967diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
29968index 5965b3d..16817fb 100644
29969--- a/drivers/infiniband/hw/nes/nes.c
29970+++ b/drivers/infiniband/hw/nes/nes.c
29971@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
29972 LIST_HEAD(nes_adapter_list);
29973 static LIST_HEAD(nes_dev_list);
29974
29975-atomic_t qps_destroyed;
29976+atomic_unchecked_t qps_destroyed;
29977
29978 static unsigned int ee_flsh_adapter;
29979 static unsigned int sysfs_nonidx_addr;
29980@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
29981 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
29982 struct nes_adapter *nesadapter = nesdev->nesadapter;
29983
29984- atomic_inc(&qps_destroyed);
29985+ atomic_inc_unchecked(&qps_destroyed);
29986
29987 /* Free the control structures */
29988
29989diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
29990index 568b4f1..5ea3eff 100644
29991--- a/drivers/infiniband/hw/nes/nes.h
29992+++ b/drivers/infiniband/hw/nes/nes.h
29993@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
29994 extern unsigned int wqm_quanta;
29995 extern struct list_head nes_adapter_list;
29996
29997-extern atomic_t cm_connects;
29998-extern atomic_t cm_accepts;
29999-extern atomic_t cm_disconnects;
30000-extern atomic_t cm_closes;
30001-extern atomic_t cm_connecteds;
30002-extern atomic_t cm_connect_reqs;
30003-extern atomic_t cm_rejects;
30004-extern atomic_t mod_qp_timouts;
30005-extern atomic_t qps_created;
30006-extern atomic_t qps_destroyed;
30007-extern atomic_t sw_qps_destroyed;
30008+extern atomic_unchecked_t cm_connects;
30009+extern atomic_unchecked_t cm_accepts;
30010+extern atomic_unchecked_t cm_disconnects;
30011+extern atomic_unchecked_t cm_closes;
30012+extern atomic_unchecked_t cm_connecteds;
30013+extern atomic_unchecked_t cm_connect_reqs;
30014+extern atomic_unchecked_t cm_rejects;
30015+extern atomic_unchecked_t mod_qp_timouts;
30016+extern atomic_unchecked_t qps_created;
30017+extern atomic_unchecked_t qps_destroyed;
30018+extern atomic_unchecked_t sw_qps_destroyed;
30019 extern u32 mh_detected;
30020 extern u32 mh_pauses_sent;
30021 extern u32 cm_packets_sent;
30022@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
30023 extern u32 cm_packets_received;
30024 extern u32 cm_packets_dropped;
30025 extern u32 cm_packets_retrans;
30026-extern atomic_t cm_listens_created;
30027-extern atomic_t cm_listens_destroyed;
30028+extern atomic_unchecked_t cm_listens_created;
30029+extern atomic_unchecked_t cm_listens_destroyed;
30030 extern u32 cm_backlog_drops;
30031-extern atomic_t cm_loopbacks;
30032-extern atomic_t cm_nodes_created;
30033-extern atomic_t cm_nodes_destroyed;
30034-extern atomic_t cm_accel_dropped_pkts;
30035-extern atomic_t cm_resets_recvd;
30036-extern atomic_t pau_qps_created;
30037-extern atomic_t pau_qps_destroyed;
30038+extern atomic_unchecked_t cm_loopbacks;
30039+extern atomic_unchecked_t cm_nodes_created;
30040+extern atomic_unchecked_t cm_nodes_destroyed;
30041+extern atomic_unchecked_t cm_accel_dropped_pkts;
30042+extern atomic_unchecked_t cm_resets_recvd;
30043+extern atomic_unchecked_t pau_qps_created;
30044+extern atomic_unchecked_t pau_qps_destroyed;
30045
30046 extern u32 int_mod_timer_init;
30047 extern u32 int_mod_cq_depth_256;
30048diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30049index 0a52d72..0642f36 100644
30050--- a/drivers/infiniband/hw/nes/nes_cm.c
30051+++ b/drivers/infiniband/hw/nes/nes_cm.c
30052@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30053 u32 cm_packets_retrans;
30054 u32 cm_packets_created;
30055 u32 cm_packets_received;
30056-atomic_t cm_listens_created;
30057-atomic_t cm_listens_destroyed;
30058+atomic_unchecked_t cm_listens_created;
30059+atomic_unchecked_t cm_listens_destroyed;
30060 u32 cm_backlog_drops;
30061-atomic_t cm_loopbacks;
30062-atomic_t cm_nodes_created;
30063-atomic_t cm_nodes_destroyed;
30064-atomic_t cm_accel_dropped_pkts;
30065-atomic_t cm_resets_recvd;
30066+atomic_unchecked_t cm_loopbacks;
30067+atomic_unchecked_t cm_nodes_created;
30068+atomic_unchecked_t cm_nodes_destroyed;
30069+atomic_unchecked_t cm_accel_dropped_pkts;
30070+atomic_unchecked_t cm_resets_recvd;
30071
30072 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
30073 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
30074@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
30075
30076 static struct nes_cm_core *g_cm_core;
30077
30078-atomic_t cm_connects;
30079-atomic_t cm_accepts;
30080-atomic_t cm_disconnects;
30081-atomic_t cm_closes;
30082-atomic_t cm_connecteds;
30083-atomic_t cm_connect_reqs;
30084-atomic_t cm_rejects;
30085+atomic_unchecked_t cm_connects;
30086+atomic_unchecked_t cm_accepts;
30087+atomic_unchecked_t cm_disconnects;
30088+atomic_unchecked_t cm_closes;
30089+atomic_unchecked_t cm_connecteds;
30090+atomic_unchecked_t cm_connect_reqs;
30091+atomic_unchecked_t cm_rejects;
30092
30093 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
30094 {
30095@@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30096 kfree(listener);
30097 listener = NULL;
30098 ret = 0;
30099- atomic_inc(&cm_listens_destroyed);
30100+ atomic_inc_unchecked(&cm_listens_destroyed);
30101 } else {
30102 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30103 }
30104@@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30105 cm_node->rem_mac);
30106
30107 add_hte_node(cm_core, cm_node);
30108- atomic_inc(&cm_nodes_created);
30109+ atomic_inc_unchecked(&cm_nodes_created);
30110
30111 return cm_node;
30112 }
30113@@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30114 }
30115
30116 atomic_dec(&cm_core->node_cnt);
30117- atomic_inc(&cm_nodes_destroyed);
30118+ atomic_inc_unchecked(&cm_nodes_destroyed);
30119 nesqp = cm_node->nesqp;
30120 if (nesqp) {
30121 nesqp->cm_node = NULL;
30122@@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30123
30124 static void drop_packet(struct sk_buff *skb)
30125 {
30126- atomic_inc(&cm_accel_dropped_pkts);
30127+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30128 dev_kfree_skb_any(skb);
30129 }
30130
30131@@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30132 {
30133
30134 int reset = 0; /* whether to send reset in case of err.. */
30135- atomic_inc(&cm_resets_recvd);
30136+ atomic_inc_unchecked(&cm_resets_recvd);
30137 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30138 " refcnt=%d\n", cm_node, cm_node->state,
30139 atomic_read(&cm_node->ref_count));
30140@@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30141 rem_ref_cm_node(cm_node->cm_core, cm_node);
30142 return NULL;
30143 }
30144- atomic_inc(&cm_loopbacks);
30145+ atomic_inc_unchecked(&cm_loopbacks);
30146 loopbackremotenode->loopbackpartner = cm_node;
30147 loopbackremotenode->tcp_cntxt.rcv_wscale =
30148 NES_CM_DEFAULT_RCV_WND_SCALE;
30149@@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30150 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
30151 else {
30152 rem_ref_cm_node(cm_core, cm_node);
30153- atomic_inc(&cm_accel_dropped_pkts);
30154+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30155 dev_kfree_skb_any(skb);
30156 }
30157 break;
30158@@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30159
30160 if ((cm_id) && (cm_id->event_handler)) {
30161 if (issue_disconn) {
30162- atomic_inc(&cm_disconnects);
30163+ atomic_inc_unchecked(&cm_disconnects);
30164 cm_event.event = IW_CM_EVENT_DISCONNECT;
30165 cm_event.status = disconn_status;
30166 cm_event.local_addr = cm_id->local_addr;
30167@@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30168 }
30169
30170 if (issue_close) {
30171- atomic_inc(&cm_closes);
30172+ atomic_inc_unchecked(&cm_closes);
30173 nes_disconnect(nesqp, 1);
30174
30175 cm_id->provider_data = nesqp;
30176@@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30177
30178 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30179 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30180- atomic_inc(&cm_accepts);
30181+ atomic_inc_unchecked(&cm_accepts);
30182
30183 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30184 netdev_refcnt_read(nesvnic->netdev));
30185@@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30186 struct nes_cm_core *cm_core;
30187 u8 *start_buff;
30188
30189- atomic_inc(&cm_rejects);
30190+ atomic_inc_unchecked(&cm_rejects);
30191 cm_node = (struct nes_cm_node *)cm_id->provider_data;
30192 loopback = cm_node->loopbackpartner;
30193 cm_core = cm_node->cm_core;
30194@@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30195 ntohl(cm_id->local_addr.sin_addr.s_addr),
30196 ntohs(cm_id->local_addr.sin_port));
30197
30198- atomic_inc(&cm_connects);
30199+ atomic_inc_unchecked(&cm_connects);
30200 nesqp->active_conn = 1;
30201
30202 /* cache the cm_id in the qp */
30203@@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30204 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30205 return err;
30206 }
30207- atomic_inc(&cm_listens_created);
30208+ atomic_inc_unchecked(&cm_listens_created);
30209 }
30210
30211 cm_id->add_ref(cm_id);
30212@@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30213
30214 if (nesqp->destroyed)
30215 return;
30216- atomic_inc(&cm_connecteds);
30217+ atomic_inc_unchecked(&cm_connecteds);
30218 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30219 " local port 0x%04X. jiffies = %lu.\n",
30220 nesqp->hwqp.qp_id,
30221@@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30222
30223 cm_id->add_ref(cm_id);
30224 ret = cm_id->event_handler(cm_id, &cm_event);
30225- atomic_inc(&cm_closes);
30226+ atomic_inc_unchecked(&cm_closes);
30227 cm_event.event = IW_CM_EVENT_CLOSE;
30228 cm_event.status = 0;
30229 cm_event.provider_data = cm_id->provider_data;
30230@@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30231 return;
30232 cm_id = cm_node->cm_id;
30233
30234- atomic_inc(&cm_connect_reqs);
30235+ atomic_inc_unchecked(&cm_connect_reqs);
30236 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30237 cm_node, cm_id, jiffies);
30238
30239@@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30240 return;
30241 cm_id = cm_node->cm_id;
30242
30243- atomic_inc(&cm_connect_reqs);
30244+ atomic_inc_unchecked(&cm_connect_reqs);
30245 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30246 cm_node, cm_id, jiffies);
30247
30248diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
30249index b3b2a24..7bfaf1e 100644
30250--- a/drivers/infiniband/hw/nes/nes_mgt.c
30251+++ b/drivers/infiniband/hw/nes/nes_mgt.c
30252@@ -40,8 +40,8 @@
30253 #include "nes.h"
30254 #include "nes_mgt.h"
30255
30256-atomic_t pau_qps_created;
30257-atomic_t pau_qps_destroyed;
30258+atomic_unchecked_t pau_qps_created;
30259+atomic_unchecked_t pau_qps_destroyed;
30260
30261 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
30262 {
30263@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
30264 {
30265 struct sk_buff *skb;
30266 unsigned long flags;
30267- atomic_inc(&pau_qps_destroyed);
30268+ atomic_inc_unchecked(&pau_qps_destroyed);
30269
30270 /* Free packets that have not yet been forwarded */
30271 /* Lock is acquired by skb_dequeue when removing the skb */
30272@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
30273 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
30274 skb_queue_head_init(&nesqp->pau_list);
30275 spin_lock_init(&nesqp->pau_lock);
30276- atomic_inc(&pau_qps_created);
30277+ atomic_inc_unchecked(&pau_qps_created);
30278 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
30279 }
30280
30281diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30282index c00d2f3..8834298 100644
30283--- a/drivers/infiniband/hw/nes/nes_nic.c
30284+++ b/drivers/infiniband/hw/nes/nes_nic.c
30285@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30286 target_stat_values[++index] = mh_detected;
30287 target_stat_values[++index] = mh_pauses_sent;
30288 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30289- target_stat_values[++index] = atomic_read(&cm_connects);
30290- target_stat_values[++index] = atomic_read(&cm_accepts);
30291- target_stat_values[++index] = atomic_read(&cm_disconnects);
30292- target_stat_values[++index] = atomic_read(&cm_connecteds);
30293- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30294- target_stat_values[++index] = atomic_read(&cm_rejects);
30295- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30296- target_stat_values[++index] = atomic_read(&qps_created);
30297- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30298- target_stat_values[++index] = atomic_read(&qps_destroyed);
30299- target_stat_values[++index] = atomic_read(&cm_closes);
30300+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30301+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30302+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30303+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30304+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30305+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30306+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30307+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30308+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30309+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30310+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30311 target_stat_values[++index] = cm_packets_sent;
30312 target_stat_values[++index] = cm_packets_bounced;
30313 target_stat_values[++index] = cm_packets_created;
30314 target_stat_values[++index] = cm_packets_received;
30315 target_stat_values[++index] = cm_packets_dropped;
30316 target_stat_values[++index] = cm_packets_retrans;
30317- target_stat_values[++index] = atomic_read(&cm_listens_created);
30318- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30319+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30320+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30321 target_stat_values[++index] = cm_backlog_drops;
30322- target_stat_values[++index] = atomic_read(&cm_loopbacks);
30323- target_stat_values[++index] = atomic_read(&cm_nodes_created);
30324- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30325- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30326- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30327+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30328+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30329+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30330+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30331+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30332 target_stat_values[++index] = nesadapter->free_4kpbl;
30333 target_stat_values[++index] = nesadapter->free_256pbl;
30334 target_stat_values[++index] = int_mod_timer_init;
30335 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
30336 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
30337 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
30338- target_stat_values[++index] = atomic_read(&pau_qps_created);
30339- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
30340+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
30341+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
30342 }
30343
30344 /**
30345diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30346index 5095bc4..41e8fff 100644
30347--- a/drivers/infiniband/hw/nes/nes_verbs.c
30348+++ b/drivers/infiniband/hw/nes/nes_verbs.c
30349@@ -46,9 +46,9 @@
30350
30351 #include <rdma/ib_umem.h>
30352
30353-atomic_t mod_qp_timouts;
30354-atomic_t qps_created;
30355-atomic_t sw_qps_destroyed;
30356+atomic_unchecked_t mod_qp_timouts;
30357+atomic_unchecked_t qps_created;
30358+atomic_unchecked_t sw_qps_destroyed;
30359
30360 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30361
30362@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30363 if (init_attr->create_flags)
30364 return ERR_PTR(-EINVAL);
30365
30366- atomic_inc(&qps_created);
30367+ atomic_inc_unchecked(&qps_created);
30368 switch (init_attr->qp_type) {
30369 case IB_QPT_RC:
30370 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30371@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30372 struct iw_cm_event cm_event;
30373 int ret = 0;
30374
30375- atomic_inc(&sw_qps_destroyed);
30376+ atomic_inc_unchecked(&sw_qps_destroyed);
30377 nesqp->destroyed = 1;
30378
30379 /* Blow away the connection if it exists. */
30380diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30381index b881bdc..c2e360c 100644
30382--- a/drivers/infiniband/hw/qib/qib.h
30383+++ b/drivers/infiniband/hw/qib/qib.h
30384@@ -51,6 +51,7 @@
30385 #include <linux/completion.h>
30386 #include <linux/kref.h>
30387 #include <linux/sched.h>
30388+#include <linux/slab.h>
30389
30390 #include "qib_common.h"
30391 #include "qib_verbs.h"
30392diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30393index c351aa4..e6967c2 100644
30394--- a/drivers/input/gameport/gameport.c
30395+++ b/drivers/input/gameport/gameport.c
30396@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30397 */
30398 static void gameport_init_port(struct gameport *gameport)
30399 {
30400- static atomic_t gameport_no = ATOMIC_INIT(0);
30401+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30402
30403 __module_get(THIS_MODULE);
30404
30405 mutex_init(&gameport->drv_mutex);
30406 device_initialize(&gameport->dev);
30407 dev_set_name(&gameport->dev, "gameport%lu",
30408- (unsigned long)atomic_inc_return(&gameport_no) - 1);
30409+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30410 gameport->dev.bus = &gameport_bus;
30411 gameport->dev.release = gameport_release_port;
30412 if (gameport->parent)
30413diff --git a/drivers/input/input.c b/drivers/input/input.c
30414index da38d97..2aa0b79 100644
30415--- a/drivers/input/input.c
30416+++ b/drivers/input/input.c
30417@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30418 */
30419 int input_register_device(struct input_dev *dev)
30420 {
30421- static atomic_t input_no = ATOMIC_INIT(0);
30422+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30423 struct input_handler *handler;
30424 const char *path;
30425 int error;
30426@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30427 dev->setkeycode = input_default_setkeycode;
30428
30429 dev_set_name(&dev->dev, "input%ld",
30430- (unsigned long) atomic_inc_return(&input_no) - 1);
30431+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30432
30433 error = device_add(&dev->dev);
30434 if (error)
30435diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30436index b8d8611..7a4a04b 100644
30437--- a/drivers/input/joystick/sidewinder.c
30438+++ b/drivers/input/joystick/sidewinder.c
30439@@ -30,6 +30,7 @@
30440 #include <linux/kernel.h>
30441 #include <linux/module.h>
30442 #include <linux/slab.h>
30443+#include <linux/sched.h>
30444 #include <linux/init.h>
30445 #include <linux/input.h>
30446 #include <linux/gameport.h>
30447diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30448index d728875..844c89b 100644
30449--- a/drivers/input/joystick/xpad.c
30450+++ b/drivers/input/joystick/xpad.c
30451@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30452
30453 static int xpad_led_probe(struct usb_xpad *xpad)
30454 {
30455- static atomic_t led_seq = ATOMIC_INIT(0);
30456+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30457 long led_no;
30458 struct xpad_led *led;
30459 struct led_classdev *led_cdev;
30460@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30461 if (!led)
30462 return -ENOMEM;
30463
30464- led_no = (long)atomic_inc_return(&led_seq) - 1;
30465+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30466
30467 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30468 led->xpad = xpad;
30469diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30470index 0110b5a..d3ad144 100644
30471--- a/drivers/input/mousedev.c
30472+++ b/drivers/input/mousedev.c
30473@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30474
30475 spin_unlock_irq(&client->packet_lock);
30476
30477- if (copy_to_user(buffer, data, count))
30478+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
30479 return -EFAULT;
30480
30481 return count;
30482diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30483index ba70058..571d25d 100644
30484--- a/drivers/input/serio/serio.c
30485+++ b/drivers/input/serio/serio.c
30486@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30487 */
30488 static void serio_init_port(struct serio *serio)
30489 {
30490- static atomic_t serio_no = ATOMIC_INIT(0);
30491+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30492
30493 __module_get(THIS_MODULE);
30494
30495@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30496 mutex_init(&serio->drv_mutex);
30497 device_initialize(&serio->dev);
30498 dev_set_name(&serio->dev, "serio%ld",
30499- (long)atomic_inc_return(&serio_no) - 1);
30500+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
30501 serio->dev.bus = &serio_bus;
30502 serio->dev.release = serio_release_port;
30503 serio->dev.groups = serio_device_attr_groups;
30504diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30505index e44933d..9ba484a 100644
30506--- a/drivers/isdn/capi/capi.c
30507+++ b/drivers/isdn/capi/capi.c
30508@@ -83,8 +83,8 @@ struct capiminor {
30509
30510 struct capi20_appl *ap;
30511 u32 ncci;
30512- atomic_t datahandle;
30513- atomic_t msgid;
30514+ atomic_unchecked_t datahandle;
30515+ atomic_unchecked_t msgid;
30516
30517 struct tty_port port;
30518 int ttyinstop;
30519@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30520 capimsg_setu16(s, 2, mp->ap->applid);
30521 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30522 capimsg_setu8 (s, 5, CAPI_RESP);
30523- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30524+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30525 capimsg_setu32(s, 8, mp->ncci);
30526 capimsg_setu16(s, 12, datahandle);
30527 }
30528@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30529 mp->outbytes -= len;
30530 spin_unlock_bh(&mp->outlock);
30531
30532- datahandle = atomic_inc_return(&mp->datahandle);
30533+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30534 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30535 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30536 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30537 capimsg_setu16(skb->data, 2, mp->ap->applid);
30538 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30539 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30540- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30541+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30542 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30543 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30544 capimsg_setu16(skb->data, 16, len); /* Data length */
30545diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30546index db621db..825ea1a 100644
30547--- a/drivers/isdn/gigaset/common.c
30548+++ b/drivers/isdn/gigaset/common.c
30549@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30550 cs->commands_pending = 0;
30551 cs->cur_at_seq = 0;
30552 cs->gotfwver = -1;
30553- cs->open_count = 0;
30554+ local_set(&cs->open_count, 0);
30555 cs->dev = NULL;
30556 cs->tty = NULL;
30557 cs->tty_dev = NULL;
30558diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30559index 212efaf..f187c6b 100644
30560--- a/drivers/isdn/gigaset/gigaset.h
30561+++ b/drivers/isdn/gigaset/gigaset.h
30562@@ -35,6 +35,7 @@
30563 #include <linux/tty_driver.h>
30564 #include <linux/list.h>
30565 #include <linux/atomic.h>
30566+#include <asm/local.h>
30567
30568 #define GIG_VERSION {0, 5, 0, 0}
30569 #define GIG_COMPAT {0, 4, 0, 0}
30570@@ -433,7 +434,7 @@ struct cardstate {
30571 spinlock_t cmdlock;
30572 unsigned curlen, cmdbytes;
30573
30574- unsigned open_count;
30575+ local_t open_count;
30576 struct tty_struct *tty;
30577 struct tasklet_struct if_wake_tasklet;
30578 unsigned control_state;
30579diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30580index ee0a549..a7c9798 100644
30581--- a/drivers/isdn/gigaset/interface.c
30582+++ b/drivers/isdn/gigaset/interface.c
30583@@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30584 }
30585 tty->driver_data = cs;
30586
30587- ++cs->open_count;
30588-
30589- if (cs->open_count == 1) {
30590+ if (local_inc_return(&cs->open_count) == 1) {
30591 spin_lock_irqsave(&cs->lock, flags);
30592 cs->tty = tty;
30593 spin_unlock_irqrestore(&cs->lock, flags);
30594@@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30595
30596 if (!cs->connected)
30597 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30598- else if (!cs->open_count)
30599+ else if (!local_read(&cs->open_count))
30600 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30601 else {
30602- if (!--cs->open_count) {
30603+ if (!local_dec_return(&cs->open_count)) {
30604 spin_lock_irqsave(&cs->lock, flags);
30605 cs->tty = NULL;
30606 spin_unlock_irqrestore(&cs->lock, flags);
30607@@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
30608 if (!cs->connected) {
30609 gig_dbg(DEBUG_IF, "not connected");
30610 retval = -ENODEV;
30611- } else if (!cs->open_count)
30612+ } else if (!local_read(&cs->open_count))
30613 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30614 else {
30615 retval = 0;
30616@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30617 retval = -ENODEV;
30618 goto done;
30619 }
30620- if (!cs->open_count) {
30621+ if (!local_read(&cs->open_count)) {
30622 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30623 retval = -ENODEV;
30624 goto done;
30625@@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
30626 if (!cs->connected) {
30627 gig_dbg(DEBUG_IF, "not connected");
30628 retval = -ENODEV;
30629- } else if (!cs->open_count)
30630+ } else if (!local_read(&cs->open_count))
30631 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30632 else if (cs->mstate != MS_LOCKED) {
30633 dev_warn(cs->dev, "can't write to unlocked device\n");
30634@@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30635
30636 if (!cs->connected)
30637 gig_dbg(DEBUG_IF, "not connected");
30638- else if (!cs->open_count)
30639+ else if (!local_read(&cs->open_count))
30640 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30641 else if (cs->mstate != MS_LOCKED)
30642 dev_warn(cs->dev, "can't write to unlocked device\n");
30643@@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
30644
30645 if (!cs->connected)
30646 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30647- else if (!cs->open_count)
30648+ else if (!local_read(&cs->open_count))
30649 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30650 else
30651 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30652@@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
30653
30654 if (!cs->connected)
30655 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30656- else if (!cs->open_count)
30657+ else if (!local_read(&cs->open_count))
30658 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30659 else
30660 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30661@@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30662 goto out;
30663 }
30664
30665- if (!cs->open_count) {
30666+ if (!local_read(&cs->open_count)) {
30667 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30668 goto out;
30669 }
30670diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30671index 2a57da59..e7a12ed 100644
30672--- a/drivers/isdn/hardware/avm/b1.c
30673+++ b/drivers/isdn/hardware/avm/b1.c
30674@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30675 }
30676 if (left) {
30677 if (t4file->user) {
30678- if (copy_from_user(buf, dp, left))
30679+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30680 return -EFAULT;
30681 } else {
30682 memcpy(buf, dp, left);
30683@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
30684 }
30685 if (left) {
30686 if (config->user) {
30687- if (copy_from_user(buf, dp, left))
30688+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30689 return -EFAULT;
30690 } else {
30691 memcpy(buf, dp, left);
30692diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
30693index 85784a7..a19ca98 100644
30694--- a/drivers/isdn/hardware/eicon/divasync.h
30695+++ b/drivers/isdn/hardware/eicon/divasync.h
30696@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
30697 } diva_didd_add_adapter_t;
30698 typedef struct _diva_didd_remove_adapter {
30699 IDI_CALL p_request;
30700-} diva_didd_remove_adapter_t;
30701+} __no_const diva_didd_remove_adapter_t;
30702 typedef struct _diva_didd_read_adapter_array {
30703 void * buffer;
30704 dword length;
30705diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
30706index a3bd163..8956575 100644
30707--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
30708+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
30709@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
30710 typedef struct _diva_os_idi_adapter_interface {
30711 diva_init_card_proc_t cleanup_adapter_proc;
30712 diva_cmd_card_proc_t cmd_proc;
30713-} diva_os_idi_adapter_interface_t;
30714+} __no_const diva_os_idi_adapter_interface_t;
30715
30716 typedef struct _diva_os_xdi_adapter {
30717 struct list_head link;
30718diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
30719index 1f355bb..43f1fea 100644
30720--- a/drivers/isdn/icn/icn.c
30721+++ b/drivers/isdn/icn/icn.c
30722@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
30723 if (count > len)
30724 count = len;
30725 if (user) {
30726- if (copy_from_user(msg, buf, count))
30727+ if (count > sizeof msg || copy_from_user(msg, buf, count))
30728 return -EFAULT;
30729 } else
30730 memcpy(msg, buf, count);
30731diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
30732index b5fdcb7..5b6c59f 100644
30733--- a/drivers/lguest/core.c
30734+++ b/drivers/lguest/core.c
30735@@ -92,9 +92,17 @@ static __init int map_switcher(void)
30736 * it's worked so far. The end address needs +1 because __get_vm_area
30737 * allocates an extra guard page, so we need space for that.
30738 */
30739+
30740+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30741+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30742+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30743+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30744+#else
30745 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30746 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30747 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30748+#endif
30749+
30750 if (!switcher_vma) {
30751 err = -ENOMEM;
30752 printk("lguest: could not map switcher pages high\n");
30753@@ -119,7 +127,7 @@ static __init int map_switcher(void)
30754 * Now the Switcher is mapped at the right address, we can't fail!
30755 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
30756 */
30757- memcpy(switcher_vma->addr, start_switcher_text,
30758+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30759 end_switcher_text - start_switcher_text);
30760
30761 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30762diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
30763index 65af42f..530c87a 100644
30764--- a/drivers/lguest/x86/core.c
30765+++ b/drivers/lguest/x86/core.c
30766@@ -59,7 +59,7 @@ static struct {
30767 /* Offset from where switcher.S was compiled to where we've copied it */
30768 static unsigned long switcher_offset(void)
30769 {
30770- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30771+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30772 }
30773
30774 /* This cpu's struct lguest_pages. */
30775@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
30776 * These copies are pretty cheap, so we do them unconditionally: */
30777 /* Save the current Host top-level page directory.
30778 */
30779+
30780+#ifdef CONFIG_PAX_PER_CPU_PGD
30781+ pages->state.host_cr3 = read_cr3();
30782+#else
30783 pages->state.host_cr3 = __pa(current->mm->pgd);
30784+#endif
30785+
30786 /*
30787 * Set up the Guest's page tables to see this CPU's pages (and no
30788 * other CPU's pages).
30789@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
30790 * compiled-in switcher code and the high-mapped copy we just made.
30791 */
30792 for (i = 0; i < IDT_ENTRIES; i++)
30793- default_idt_entries[i] += switcher_offset();
30794+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30795
30796 /*
30797 * Set up the Switcher's per-cpu areas.
30798@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
30799 * it will be undisturbed when we switch. To change %cs and jump we
30800 * need this structure to feed to Intel's "lcall" instruction.
30801 */
30802- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30803+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30804 lguest_entry.segment = LGUEST_CS;
30805
30806 /*
30807diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
30808index 40634b0..4f5855e 100644
30809--- a/drivers/lguest/x86/switcher_32.S
30810+++ b/drivers/lguest/x86/switcher_32.S
30811@@ -87,6 +87,7 @@
30812 #include <asm/page.h>
30813 #include <asm/segment.h>
30814 #include <asm/lguest.h>
30815+#include <asm/processor-flags.h>
30816
30817 // We mark the start of the code to copy
30818 // It's placed in .text tho it's never run here
30819@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30820 // Changes type when we load it: damn Intel!
30821 // For after we switch over our page tables
30822 // That entry will be read-only: we'd crash.
30823+
30824+#ifdef CONFIG_PAX_KERNEXEC
30825+ mov %cr0, %edx
30826+ xor $X86_CR0_WP, %edx
30827+ mov %edx, %cr0
30828+#endif
30829+
30830 movl $(GDT_ENTRY_TSS*8), %edx
30831 ltr %dx
30832
30833@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30834 // Let's clear it again for our return.
30835 // The GDT descriptor of the Host
30836 // Points to the table after two "size" bytes
30837- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30838+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30839 // Clear "used" from type field (byte 5, bit 2)
30840- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30841+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30842+
30843+#ifdef CONFIG_PAX_KERNEXEC
30844+ mov %cr0, %eax
30845+ xor $X86_CR0_WP, %eax
30846+ mov %eax, %cr0
30847+#endif
30848
30849 // Once our page table's switched, the Guest is live!
30850 // The Host fades as we run this final step.
30851@@ -295,13 +309,12 @@ deliver_to_host:
30852 // I consulted gcc, and it gave
30853 // These instructions, which I gladly credit:
30854 leal (%edx,%ebx,8), %eax
30855- movzwl (%eax),%edx
30856- movl 4(%eax), %eax
30857- xorw %ax, %ax
30858- orl %eax, %edx
30859+ movl 4(%eax), %edx
30860+ movw (%eax), %dx
30861 // Now the address of the handler's in %edx
30862 // We call it now: its "iret" drops us home.
30863- jmp *%edx
30864+ ljmp $__KERNEL_CS, $1f
30865+1: jmp *%edx
30866
30867 // Every interrupt can come to us here
30868 // But we must truly tell each apart.
30869diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
30870index 4daf9e5..b8d1d0f 100644
30871--- a/drivers/macintosh/macio_asic.c
30872+++ b/drivers/macintosh/macio_asic.c
30873@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
30874 * MacIO is matched against any Apple ID, it's probe() function
30875 * will then decide wether it applies or not
30876 */
30877-static const struct pci_device_id __devinitdata pci_ids [] = { {
30878+static const struct pci_device_id __devinitconst pci_ids [] = { {
30879 .vendor = PCI_VENDOR_ID_APPLE,
30880 .device = PCI_ANY_ID,
30881 .subvendor = PCI_ANY_ID,
30882diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
30883index 31c2dc2..a2de7a6 100644
30884--- a/drivers/md/dm-ioctl.c
30885+++ b/drivers/md/dm-ioctl.c
30886@@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
30887 cmd == DM_LIST_VERSIONS_CMD)
30888 return 0;
30889
30890- if ((cmd == DM_DEV_CREATE_CMD)) {
30891+ if (cmd == DM_DEV_CREATE_CMD) {
30892 if (!*param->name) {
30893 DMWARN("name not supplied when creating device");
30894 return -EINVAL;
30895diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
30896index 9bfd057..01180bc 100644
30897--- a/drivers/md/dm-raid1.c
30898+++ b/drivers/md/dm-raid1.c
30899@@ -40,7 +40,7 @@ enum dm_raid1_error {
30900
30901 struct mirror {
30902 struct mirror_set *ms;
30903- atomic_t error_count;
30904+ atomic_unchecked_t error_count;
30905 unsigned long error_type;
30906 struct dm_dev *dev;
30907 sector_t offset;
30908@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
30909 struct mirror *m;
30910
30911 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
30912- if (!atomic_read(&m->error_count))
30913+ if (!atomic_read_unchecked(&m->error_count))
30914 return m;
30915
30916 return NULL;
30917@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
30918 * simple way to tell if a device has encountered
30919 * errors.
30920 */
30921- atomic_inc(&m->error_count);
30922+ atomic_inc_unchecked(&m->error_count);
30923
30924 if (test_and_set_bit(error_type, &m->error_type))
30925 return;
30926@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
30927 struct mirror *m = get_default_mirror(ms);
30928
30929 do {
30930- if (likely(!atomic_read(&m->error_count)))
30931+ if (likely(!atomic_read_unchecked(&m->error_count)))
30932 return m;
30933
30934 if (m-- == ms->mirror)
30935@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
30936 {
30937 struct mirror *default_mirror = get_default_mirror(m->ms);
30938
30939- return !atomic_read(&default_mirror->error_count);
30940+ return !atomic_read_unchecked(&default_mirror->error_count);
30941 }
30942
30943 static int mirror_available(struct mirror_set *ms, struct bio *bio)
30944@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
30945 */
30946 if (likely(region_in_sync(ms, region, 1)))
30947 m = choose_mirror(ms, bio->bi_sector);
30948- else if (m && atomic_read(&m->error_count))
30949+ else if (m && atomic_read_unchecked(&m->error_count))
30950 m = NULL;
30951
30952 if (likely(m))
30953@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
30954 }
30955
30956 ms->mirror[mirror].ms = ms;
30957- atomic_set(&(ms->mirror[mirror].error_count), 0);
30958+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
30959 ms->mirror[mirror].error_type = 0;
30960 ms->mirror[mirror].offset = offset;
30961
30962@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
30963 */
30964 static char device_status_char(struct mirror *m)
30965 {
30966- if (!atomic_read(&(m->error_count)))
30967+ if (!atomic_read_unchecked(&(m->error_count)))
30968 return 'A';
30969
30970 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
30971diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
30972index 3d80cf0..b77cc47 100644
30973--- a/drivers/md/dm-stripe.c
30974+++ b/drivers/md/dm-stripe.c
30975@@ -20,7 +20,7 @@ struct stripe {
30976 struct dm_dev *dev;
30977 sector_t physical_start;
30978
30979- atomic_t error_count;
30980+ atomic_unchecked_t error_count;
30981 };
30982
30983 struct stripe_c {
30984@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
30985 kfree(sc);
30986 return r;
30987 }
30988- atomic_set(&(sc->stripe[i].error_count), 0);
30989+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
30990 }
30991
30992 ti->private = sc;
30993@@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
30994 DMEMIT("%d ", sc->stripes);
30995 for (i = 0; i < sc->stripes; i++) {
30996 DMEMIT("%s ", sc->stripe[i].dev->name);
30997- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
30998+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
30999 'D' : 'A';
31000 }
31001 buffer[i] = '\0';
31002@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31003 */
31004 for (i = 0; i < sc->stripes; i++)
31005 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31006- atomic_inc(&(sc->stripe[i].error_count));
31007- if (atomic_read(&(sc->stripe[i].error_count)) <
31008+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
31009+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31010 DM_IO_ERROR_THRESHOLD)
31011 schedule_work(&sc->trigger_event);
31012 }
31013diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31014index 8e91321..fd17aef 100644
31015--- a/drivers/md/dm-table.c
31016+++ b/drivers/md/dm-table.c
31017@@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31018 if (!dev_size)
31019 return 0;
31020
31021- if ((start >= dev_size) || (start + len > dev_size)) {
31022+ if ((start >= dev_size) || (len > dev_size - start)) {
31023 DMWARN("%s: %s too small for target: "
31024 "start=%llu, len=%llu, dev_size=%llu",
31025 dm_device_name(ti->table->md), bdevname(bdev, b),
31026diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
31027index 59c4f04..4c7b661 100644
31028--- a/drivers/md/dm-thin-metadata.c
31029+++ b/drivers/md/dm-thin-metadata.c
31030@@ -431,7 +431,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31031
31032 pmd->info.tm = tm;
31033 pmd->info.levels = 2;
31034- pmd->info.value_type.context = pmd->data_sm;
31035+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31036 pmd->info.value_type.size = sizeof(__le64);
31037 pmd->info.value_type.inc = data_block_inc;
31038 pmd->info.value_type.dec = data_block_dec;
31039@@ -450,7 +450,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31040
31041 pmd->bl_info.tm = tm;
31042 pmd->bl_info.levels = 1;
31043- pmd->bl_info.value_type.context = pmd->data_sm;
31044+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31045 pmd->bl_info.value_type.size = sizeof(__le64);
31046 pmd->bl_info.value_type.inc = data_block_inc;
31047 pmd->bl_info.value_type.dec = data_block_dec;
31048diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31049index 4720f68..78d1df7 100644
31050--- a/drivers/md/dm.c
31051+++ b/drivers/md/dm.c
31052@@ -177,9 +177,9 @@ struct mapped_device {
31053 /*
31054 * Event handling.
31055 */
31056- atomic_t event_nr;
31057+ atomic_unchecked_t event_nr;
31058 wait_queue_head_t eventq;
31059- atomic_t uevent_seq;
31060+ atomic_unchecked_t uevent_seq;
31061 struct list_head uevent_list;
31062 spinlock_t uevent_lock; /* Protect access to uevent_list */
31063
31064@@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
31065 rwlock_init(&md->map_lock);
31066 atomic_set(&md->holders, 1);
31067 atomic_set(&md->open_count, 0);
31068- atomic_set(&md->event_nr, 0);
31069- atomic_set(&md->uevent_seq, 0);
31070+ atomic_set_unchecked(&md->event_nr, 0);
31071+ atomic_set_unchecked(&md->uevent_seq, 0);
31072 INIT_LIST_HEAD(&md->uevent_list);
31073 spin_lock_init(&md->uevent_lock);
31074
31075@@ -1980,7 +1980,7 @@ static void event_callback(void *context)
31076
31077 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31078
31079- atomic_inc(&md->event_nr);
31080+ atomic_inc_unchecked(&md->event_nr);
31081 wake_up(&md->eventq);
31082 }
31083
31084@@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31085
31086 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31087 {
31088- return atomic_add_return(1, &md->uevent_seq);
31089+ return atomic_add_return_unchecked(1, &md->uevent_seq);
31090 }
31091
31092 uint32_t dm_get_event_nr(struct mapped_device *md)
31093 {
31094- return atomic_read(&md->event_nr);
31095+ return atomic_read_unchecked(&md->event_nr);
31096 }
31097
31098 int dm_wait_event(struct mapped_device *md, int event_nr)
31099 {
31100 return wait_event_interruptible(md->eventq,
31101- (event_nr != atomic_read(&md->event_nr)));
31102+ (event_nr != atomic_read_unchecked(&md->event_nr)));
31103 }
31104
31105 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31106diff --git a/drivers/md/md.c b/drivers/md/md.c
31107index f47f1f8..b7f559e 100644
31108--- a/drivers/md/md.c
31109+++ b/drivers/md/md.c
31110@@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31111 * start build, activate spare
31112 */
31113 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31114-static atomic_t md_event_count;
31115+static atomic_unchecked_t md_event_count;
31116 void md_new_event(struct mddev *mddev)
31117 {
31118- atomic_inc(&md_event_count);
31119+ atomic_inc_unchecked(&md_event_count);
31120 wake_up(&md_event_waiters);
31121 }
31122 EXPORT_SYMBOL_GPL(md_new_event);
31123@@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31124 */
31125 static void md_new_event_inintr(struct mddev *mddev)
31126 {
31127- atomic_inc(&md_event_count);
31128+ atomic_inc_unchecked(&md_event_count);
31129 wake_up(&md_event_waiters);
31130 }
31131
31132@@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
31133
31134 rdev->preferred_minor = 0xffff;
31135 rdev->data_offset = le64_to_cpu(sb->data_offset);
31136- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31137+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31138
31139 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31140 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31141@@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
31142 else
31143 sb->resync_offset = cpu_to_le64(0);
31144
31145- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31146+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31147
31148 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31149 sb->size = cpu_to_le64(mddev->dev_sectors);
31150@@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31151 static ssize_t
31152 errors_show(struct md_rdev *rdev, char *page)
31153 {
31154- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31155+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31156 }
31157
31158 static ssize_t
31159@@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
31160 char *e;
31161 unsigned long n = simple_strtoul(buf, &e, 10);
31162 if (*buf && (*e == 0 || *e == '\n')) {
31163- atomic_set(&rdev->corrected_errors, n);
31164+ atomic_set_unchecked(&rdev->corrected_errors, n);
31165 return len;
31166 }
31167 return -EINVAL;
31168@@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev)
31169 rdev->sb_loaded = 0;
31170 rdev->bb_page = NULL;
31171 atomic_set(&rdev->nr_pending, 0);
31172- atomic_set(&rdev->read_errors, 0);
31173- atomic_set(&rdev->corrected_errors, 0);
31174+ atomic_set_unchecked(&rdev->read_errors, 0);
31175+ atomic_set_unchecked(&rdev->corrected_errors, 0);
31176
31177 INIT_LIST_HEAD(&rdev->same_set);
31178 init_waitqueue_head(&rdev->blocked_wait);
31179@@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31180
31181 spin_unlock(&pers_lock);
31182 seq_printf(seq, "\n");
31183- seq->poll_event = atomic_read(&md_event_count);
31184+ seq->poll_event = atomic_read_unchecked(&md_event_count);
31185 return 0;
31186 }
31187 if (v == (void*)2) {
31188@@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31189 chunk_kb ? "KB" : "B");
31190 if (bitmap->file) {
31191 seq_printf(seq, ", file: ");
31192- seq_path(seq, &bitmap->file->f_path, " \t\n");
31193+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31194 }
31195
31196 seq_printf(seq, "\n");
31197@@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31198 return error;
31199
31200 seq = file->private_data;
31201- seq->poll_event = atomic_read(&md_event_count);
31202+ seq->poll_event = atomic_read_unchecked(&md_event_count);
31203 return error;
31204 }
31205
31206@@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31207 /* always allow read */
31208 mask = POLLIN | POLLRDNORM;
31209
31210- if (seq->poll_event != atomic_read(&md_event_count))
31211+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31212 mask |= POLLERR | POLLPRI;
31213 return mask;
31214 }
31215@@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
31216 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31217 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31218 (int)part_stat_read(&disk->part0, sectors[1]) -
31219- atomic_read(&disk->sync_io);
31220+ atomic_read_unchecked(&disk->sync_io);
31221 /* sync IO will cause sync_io to increase before the disk_stats
31222 * as sync_io is counted when a request starts, and
31223 * disk_stats is counted when it completes.
31224diff --git a/drivers/md/md.h b/drivers/md/md.h
31225index cf742d9..7c7c745 100644
31226--- a/drivers/md/md.h
31227+++ b/drivers/md/md.h
31228@@ -120,13 +120,13 @@ struct md_rdev {
31229 * only maintained for arrays that
31230 * support hot removal
31231 */
31232- atomic_t read_errors; /* number of consecutive read errors that
31233+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
31234 * we have tried to ignore.
31235 */
31236 struct timespec last_read_error; /* monotonic time since our
31237 * last read error
31238 */
31239- atomic_t corrected_errors; /* number of corrected read errors,
31240+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31241 * for reporting to userspace and storing
31242 * in superblock.
31243 */
31244@@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
31245
31246 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31247 {
31248- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31249+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31250 }
31251
31252 struct md_personality
31253diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
31254index 50ed53b..4f29d7d 100644
31255--- a/drivers/md/persistent-data/dm-space-map-checker.c
31256+++ b/drivers/md/persistent-data/dm-space-map-checker.c
31257@@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
31258 /*----------------------------------------------------------------*/
31259
31260 struct sm_checker {
31261- struct dm_space_map sm;
31262+ dm_space_map_no_const sm;
31263
31264 struct count_array old_counts;
31265 struct count_array counts;
31266diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
31267index fc469ba..2d91555 100644
31268--- a/drivers/md/persistent-data/dm-space-map-disk.c
31269+++ b/drivers/md/persistent-data/dm-space-map-disk.c
31270@@ -23,7 +23,7 @@
31271 * Space map interface.
31272 */
31273 struct sm_disk {
31274- struct dm_space_map sm;
31275+ dm_space_map_no_const sm;
31276
31277 struct ll_disk ll;
31278 struct ll_disk old_ll;
31279diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
31280index e89ae5e..062e4c2 100644
31281--- a/drivers/md/persistent-data/dm-space-map-metadata.c
31282+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
31283@@ -43,7 +43,7 @@ struct block_op {
31284 };
31285
31286 struct sm_metadata {
31287- struct dm_space_map sm;
31288+ dm_space_map_no_const sm;
31289
31290 struct ll_disk ll;
31291 struct ll_disk old_ll;
31292diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
31293index 1cbfc6b..56e1dbb 100644
31294--- a/drivers/md/persistent-data/dm-space-map.h
31295+++ b/drivers/md/persistent-data/dm-space-map.h
31296@@ -60,6 +60,7 @@ struct dm_space_map {
31297 int (*root_size)(struct dm_space_map *sm, size_t *result);
31298 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
31299 };
31300+typedef struct dm_space_map __no_const dm_space_map_no_const;
31301
31302 /*----------------------------------------------------------------*/
31303
31304diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31305index 7d9e071..015b1d5 100644
31306--- a/drivers/md/raid1.c
31307+++ b/drivers/md/raid1.c
31308@@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
31309 if (r1_sync_page_io(rdev, sect, s,
31310 bio->bi_io_vec[idx].bv_page,
31311 READ) != 0)
31312- atomic_add(s, &rdev->corrected_errors);
31313+ atomic_add_unchecked(s, &rdev->corrected_errors);
31314 }
31315 sectors -= s;
31316 sect += s;
31317@@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
31318 test_bit(In_sync, &rdev->flags)) {
31319 if (r1_sync_page_io(rdev, sect, s,
31320 conf->tmppage, READ)) {
31321- atomic_add(s, &rdev->corrected_errors);
31322+ atomic_add_unchecked(s, &rdev->corrected_errors);
31323 printk(KERN_INFO
31324 "md/raid1:%s: read error corrected "
31325 "(%d sectors at %llu on %s)\n",
31326diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31327index 685ddf3..955b087 100644
31328--- a/drivers/md/raid10.c
31329+++ b/drivers/md/raid10.c
31330@@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error)
31331 /* The write handler will notice the lack of
31332 * R10BIO_Uptodate and record any errors etc
31333 */
31334- atomic_add(r10_bio->sectors,
31335+ atomic_add_unchecked(r10_bio->sectors,
31336 &conf->mirrors[d].rdev->corrected_errors);
31337
31338 /* for reconstruct, we always reschedule after a read.
31339@@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31340 {
31341 struct timespec cur_time_mon;
31342 unsigned long hours_since_last;
31343- unsigned int read_errors = atomic_read(&rdev->read_errors);
31344+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31345
31346 ktime_get_ts(&cur_time_mon);
31347
31348@@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31349 * overflowing the shift of read_errors by hours_since_last.
31350 */
31351 if (hours_since_last >= 8 * sizeof(read_errors))
31352- atomic_set(&rdev->read_errors, 0);
31353+ atomic_set_unchecked(&rdev->read_errors, 0);
31354 else
31355- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31356+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31357 }
31358
31359 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
31360@@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31361 return;
31362
31363 check_decay_read_errors(mddev, rdev);
31364- atomic_inc(&rdev->read_errors);
31365- if (atomic_read(&rdev->read_errors) > max_read_errors) {
31366+ atomic_inc_unchecked(&rdev->read_errors);
31367+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31368 char b[BDEVNAME_SIZE];
31369 bdevname(rdev->bdev, b);
31370
31371@@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31372 "md/raid10:%s: %s: Raid device exceeded "
31373 "read_error threshold [cur %d:max %d]\n",
31374 mdname(mddev), b,
31375- atomic_read(&rdev->read_errors), max_read_errors);
31376+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31377 printk(KERN_NOTICE
31378 "md/raid10:%s: %s: Failing raid device\n",
31379 mdname(mddev), b);
31380@@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31381 (unsigned long long)(
31382 sect + rdev->data_offset),
31383 bdevname(rdev->bdev, b));
31384- atomic_add(s, &rdev->corrected_errors);
31385+ atomic_add_unchecked(s, &rdev->corrected_errors);
31386 }
31387
31388 rdev_dec_pending(rdev, mddev);
31389diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31390index 858fdbb..b2dac95 100644
31391--- a/drivers/md/raid5.c
31392+++ b/drivers/md/raid5.c
31393@@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31394 (unsigned long long)(sh->sector
31395 + rdev->data_offset),
31396 bdevname(rdev->bdev, b));
31397- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31398+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31399 clear_bit(R5_ReadError, &sh->dev[i].flags);
31400 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31401 }
31402- if (atomic_read(&conf->disks[i].rdev->read_errors))
31403- atomic_set(&conf->disks[i].rdev->read_errors, 0);
31404+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31405+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31406 } else {
31407 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31408 int retry = 0;
31409 rdev = conf->disks[i].rdev;
31410
31411 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31412- atomic_inc(&rdev->read_errors);
31413+ atomic_inc_unchecked(&rdev->read_errors);
31414 if (conf->mddev->degraded >= conf->max_degraded)
31415 printk_ratelimited(
31416 KERN_WARNING
31417@@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31418 (unsigned long long)(sh->sector
31419 + rdev->data_offset),
31420 bdn);
31421- else if (atomic_read(&rdev->read_errors)
31422+ else if (atomic_read_unchecked(&rdev->read_errors)
31423 > conf->max_nr_stripes)
31424 printk(KERN_WARNING
31425 "md/raid:%s: Too many read errors, failing device %s.\n",
31426diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31427index ba9a643..e474ab5 100644
31428--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31429+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31430@@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
31431 .subvendor = _subvend, .subdevice = _subdev, \
31432 .driver_data = (unsigned long)&_driverdata }
31433
31434-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31435+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31436 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31437 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31438 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31439diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31440index a7d876f..8c21b61 100644
31441--- a/drivers/media/dvb/dvb-core/dvb_demux.h
31442+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31443@@ -73,7 +73,7 @@ struct dvb_demux_feed {
31444 union {
31445 dmx_ts_cb ts;
31446 dmx_section_cb sec;
31447- } cb;
31448+ } __no_const cb;
31449
31450 struct dvb_demux *demux;
31451 void *priv;
31452diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31453index f732877..d38c35a 100644
31454--- a/drivers/media/dvb/dvb-core/dvbdev.c
31455+++ b/drivers/media/dvb/dvb-core/dvbdev.c
31456@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31457 const struct dvb_device *template, void *priv, int type)
31458 {
31459 struct dvb_device *dvbdev;
31460- struct file_operations *dvbdevfops;
31461+ file_operations_no_const *dvbdevfops;
31462 struct device *clsdev;
31463 int minor;
31464 int id;
31465diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31466index 9f2a02c..5920f88 100644
31467--- a/drivers/media/dvb/dvb-usb/cxusb.c
31468+++ b/drivers/media/dvb/dvb-usb/cxusb.c
31469@@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31470 struct dib0700_adapter_state {
31471 int (*set_param_save) (struct dvb_frontend *,
31472 struct dvb_frontend_parameters *);
31473-};
31474+} __no_const;
31475
31476 static int dib7070_set_param_override(struct dvb_frontend *fe,
31477 struct dvb_frontend_parameters *fep)
31478diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31479index f103ec1..5e8968b 100644
31480--- a/drivers/media/dvb/dvb-usb/dw2102.c
31481+++ b/drivers/media/dvb/dvb-usb/dw2102.c
31482@@ -95,7 +95,7 @@ struct su3000_state {
31483
31484 struct s6x0_state {
31485 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31486-};
31487+} __no_const;
31488
31489 /* debug */
31490 static int dvb_usb_dw2102_debug;
31491diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31492index 404f63a..4796533 100644
31493--- a/drivers/media/dvb/frontends/dib3000.h
31494+++ b/drivers/media/dvb/frontends/dib3000.h
31495@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31496 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31497 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31498 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31499-};
31500+} __no_const;
31501
31502 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31503 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31504diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
31505index 90bf573..e8463da 100644
31506--- a/drivers/media/dvb/frontends/ds3000.c
31507+++ b/drivers/media/dvb/frontends/ds3000.c
31508@@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
31509
31510 for (i = 0; i < 30 ; i++) {
31511 ds3000_read_status(fe, &status);
31512- if (status && FE_HAS_LOCK)
31513+ if (status & FE_HAS_LOCK)
31514 break;
31515
31516 msleep(10);
31517diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31518index 0564192..75b16f5 100644
31519--- a/drivers/media/dvb/ngene/ngene-cards.c
31520+++ b/drivers/media/dvb/ngene/ngene-cards.c
31521@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31522
31523 /****************************************************************************/
31524
31525-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31526+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31527 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31528 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31529 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31530diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31531index 16a089f..ab1667d 100644
31532--- a/drivers/media/radio/radio-cadet.c
31533+++ b/drivers/media/radio/radio-cadet.c
31534@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31535 unsigned char readbuf[RDS_BUFFER];
31536 int i = 0;
31537
31538+ if (count > RDS_BUFFER)
31539+ return -EFAULT;
31540 mutex_lock(&dev->lock);
31541 if (dev->rdsstat == 0) {
31542 dev->rdsstat = 1;
31543diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
31544index 61287fc..8b08712 100644
31545--- a/drivers/media/rc/redrat3.c
31546+++ b/drivers/media/rc/redrat3.c
31547@@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
31548 return carrier;
31549 }
31550
31551-static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
31552+static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
31553 {
31554 struct redrat3_dev *rr3 = rcdev->priv;
31555 struct device *dev = rr3->dev;
31556diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31557index 9cde353..8c6a1c3 100644
31558--- a/drivers/media/video/au0828/au0828.h
31559+++ b/drivers/media/video/au0828/au0828.h
31560@@ -191,7 +191,7 @@ struct au0828_dev {
31561
31562 /* I2C */
31563 struct i2c_adapter i2c_adap;
31564- struct i2c_algorithm i2c_algo;
31565+ i2c_algorithm_no_const i2c_algo;
31566 struct i2c_client i2c_client;
31567 u32 i2c_rc;
31568
31569diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31570index 68d1240..46b32eb 100644
31571--- a/drivers/media/video/cx88/cx88-alsa.c
31572+++ b/drivers/media/video/cx88/cx88-alsa.c
31573@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31574 * Only boards with eeprom and byte 1 at eeprom=1 have it
31575 */
31576
31577-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31578+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31579 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31580 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31581 {0, }
31582diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31583index 305e6aa..0143317 100644
31584--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31585+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31586@@ -196,7 +196,7 @@ struct pvr2_hdw {
31587
31588 /* I2C stuff */
31589 struct i2c_adapter i2c_adap;
31590- struct i2c_algorithm i2c_algo;
31591+ i2c_algorithm_no_const i2c_algo;
31592 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31593 int i2c_cx25840_hack_state;
31594 int i2c_linked;
31595diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31596index a0895bf..b7ebb1b 100644
31597--- a/drivers/media/video/timblogiw.c
31598+++ b/drivers/media/video/timblogiw.c
31599@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31600
31601 /* Platform device functions */
31602
31603-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31604+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31605 .vidioc_querycap = timblogiw_querycap,
31606 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31607 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31608@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31609 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31610 };
31611
31612-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31613+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31614 .owner = THIS_MODULE,
31615 .open = timblogiw_open,
31616 .release = timblogiw_close,
31617diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31618index e9c6a60..daf6a33 100644
31619--- a/drivers/message/fusion/mptbase.c
31620+++ b/drivers/message/fusion/mptbase.c
31621@@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
31622 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31623 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31624
31625+#ifdef CONFIG_GRKERNSEC_HIDESYM
31626+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
31627+#else
31628 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31629 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31630+#endif
31631+
31632 /*
31633 * Rounding UP to nearest 4-kB boundary here...
31634 */
31635diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31636index 9d95042..b808101 100644
31637--- a/drivers/message/fusion/mptsas.c
31638+++ b/drivers/message/fusion/mptsas.c
31639@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
31640 return 0;
31641 }
31642
31643+static inline void
31644+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31645+{
31646+ if (phy_info->port_details) {
31647+ phy_info->port_details->rphy = rphy;
31648+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31649+ ioc->name, rphy));
31650+ }
31651+
31652+ if (rphy) {
31653+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31654+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31655+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31656+ ioc->name, rphy, rphy->dev.release));
31657+ }
31658+}
31659+
31660 /* no mutex */
31661 static void
31662 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31663@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
31664 return NULL;
31665 }
31666
31667-static inline void
31668-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31669-{
31670- if (phy_info->port_details) {
31671- phy_info->port_details->rphy = rphy;
31672- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31673- ioc->name, rphy));
31674- }
31675-
31676- if (rphy) {
31677- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31678- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31679- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31680- ioc->name, rphy, rphy->dev.release));
31681- }
31682-}
31683-
31684 static inline struct sas_port *
31685 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31686 {
31687diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
31688index 0c3ced7..1fe34ec 100644
31689--- a/drivers/message/fusion/mptscsih.c
31690+++ b/drivers/message/fusion/mptscsih.c
31691@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31692
31693 h = shost_priv(SChost);
31694
31695- if (h) {
31696- if (h->info_kbuf == NULL)
31697- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31698- return h->info_kbuf;
31699- h->info_kbuf[0] = '\0';
31700+ if (!h)
31701+ return NULL;
31702
31703- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31704- h->info_kbuf[size-1] = '\0';
31705- }
31706+ if (h->info_kbuf == NULL)
31707+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31708+ return h->info_kbuf;
31709+ h->info_kbuf[0] = '\0';
31710+
31711+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31712+ h->info_kbuf[size-1] = '\0';
31713
31714 return h->info_kbuf;
31715 }
31716diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
31717index 07dbeaf..5533142 100644
31718--- a/drivers/message/i2o/i2o_proc.c
31719+++ b/drivers/message/i2o/i2o_proc.c
31720@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
31721 "Array Controller Device"
31722 };
31723
31724-static char *chtostr(u8 * chars, int n)
31725-{
31726- char tmp[256];
31727- tmp[0] = 0;
31728- return strncat(tmp, (char *)chars, n);
31729-}
31730-
31731 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31732 char *group)
31733 {
31734@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
31735
31736 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31737 seq_printf(seq, "%-#8x", ddm_table.module_id);
31738- seq_printf(seq, "%-29s",
31739- chtostr(ddm_table.module_name_version, 28));
31740+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31741 seq_printf(seq, "%9d ", ddm_table.data_size);
31742 seq_printf(seq, "%8d", ddm_table.code_size);
31743
31744@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
31745
31746 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31747 seq_printf(seq, "%-#8x", dst->module_id);
31748- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31749- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31750+ seq_printf(seq, "%-.28s", dst->module_name_version);
31751+ seq_printf(seq, "%-.8s", dst->date);
31752 seq_printf(seq, "%8d ", dst->module_size);
31753 seq_printf(seq, "%8d ", dst->mpb_size);
31754 seq_printf(seq, "0x%04x", dst->module_flags);
31755@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
31756 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31757 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31758 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31759- seq_printf(seq, "Vendor info : %s\n",
31760- chtostr((u8 *) (work32 + 2), 16));
31761- seq_printf(seq, "Product info : %s\n",
31762- chtostr((u8 *) (work32 + 6), 16));
31763- seq_printf(seq, "Description : %s\n",
31764- chtostr((u8 *) (work32 + 10), 16));
31765- seq_printf(seq, "Product rev. : %s\n",
31766- chtostr((u8 *) (work32 + 14), 8));
31767+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31768+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31769+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31770+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31771
31772 seq_printf(seq, "Serial number : ");
31773 print_serial_number(seq, (u8 *) (work32 + 16),
31774@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
31775 }
31776
31777 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31778- seq_printf(seq, "Module name : %s\n",
31779- chtostr(result.module_name, 24));
31780- seq_printf(seq, "Module revision : %s\n",
31781- chtostr(result.module_rev, 8));
31782+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
31783+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31784
31785 seq_printf(seq, "Serial number : ");
31786 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31787@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
31788 return 0;
31789 }
31790
31791- seq_printf(seq, "Device name : %s\n",
31792- chtostr(result.device_name, 64));
31793- seq_printf(seq, "Service name : %s\n",
31794- chtostr(result.service_name, 64));
31795- seq_printf(seq, "Physical name : %s\n",
31796- chtostr(result.physical_location, 64));
31797- seq_printf(seq, "Instance number : %s\n",
31798- chtostr(result.instance_number, 4));
31799+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
31800+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
31801+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31802+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31803
31804 return 0;
31805 }
31806diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
31807index a8c08f3..155fe3d 100644
31808--- a/drivers/message/i2o/iop.c
31809+++ b/drivers/message/i2o/iop.c
31810@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
31811
31812 spin_lock_irqsave(&c->context_list_lock, flags);
31813
31814- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31815- atomic_inc(&c->context_list_counter);
31816+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31817+ atomic_inc_unchecked(&c->context_list_counter);
31818
31819- entry->context = atomic_read(&c->context_list_counter);
31820+ entry->context = atomic_read_unchecked(&c->context_list_counter);
31821
31822 list_add(&entry->list, &c->context_list);
31823
31824@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
31825
31826 #if BITS_PER_LONG == 64
31827 spin_lock_init(&c->context_list_lock);
31828- atomic_set(&c->context_list_counter, 0);
31829+ atomic_set_unchecked(&c->context_list_counter, 0);
31830 INIT_LIST_HEAD(&c->context_list);
31831 #endif
31832
31833diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
31834index 7ce65f4..e66e9bc 100644
31835--- a/drivers/mfd/abx500-core.c
31836+++ b/drivers/mfd/abx500-core.c
31837@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
31838
31839 struct abx500_device_entry {
31840 struct list_head list;
31841- struct abx500_ops ops;
31842+ abx500_ops_no_const ops;
31843 struct device *dev;
31844 };
31845
31846diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
31847index 5c2a06a..8fa077c 100644
31848--- a/drivers/mfd/janz-cmodio.c
31849+++ b/drivers/mfd/janz-cmodio.c
31850@@ -13,6 +13,7 @@
31851
31852 #include <linux/kernel.h>
31853 #include <linux/module.h>
31854+#include <linux/slab.h>
31855 #include <linux/init.h>
31856 #include <linux/pci.h>
31857 #include <linux/interrupt.h>
31858diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
31859index 29d12a7..f900ba4 100644
31860--- a/drivers/misc/lis3lv02d/lis3lv02d.c
31861+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
31862@@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
31863 * the lid is closed. This leads to interrupts as soon as a little move
31864 * is done.
31865 */
31866- atomic_inc(&lis3->count);
31867+ atomic_inc_unchecked(&lis3->count);
31868
31869 wake_up_interruptible(&lis3->misc_wait);
31870 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
31871@@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
31872 if (lis3->pm_dev)
31873 pm_runtime_get_sync(lis3->pm_dev);
31874
31875- atomic_set(&lis3->count, 0);
31876+ atomic_set_unchecked(&lis3->count, 0);
31877 return 0;
31878 }
31879
31880@@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
31881 add_wait_queue(&lis3->misc_wait, &wait);
31882 while (true) {
31883 set_current_state(TASK_INTERRUPTIBLE);
31884- data = atomic_xchg(&lis3->count, 0);
31885+ data = atomic_xchg_unchecked(&lis3->count, 0);
31886 if (data)
31887 break;
31888
31889@@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
31890 struct lis3lv02d, miscdev);
31891
31892 poll_wait(file, &lis3->misc_wait, wait);
31893- if (atomic_read(&lis3->count))
31894+ if (atomic_read_unchecked(&lis3->count))
31895 return POLLIN | POLLRDNORM;
31896 return 0;
31897 }
31898diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
31899index 2b1482a..5d33616 100644
31900--- a/drivers/misc/lis3lv02d/lis3lv02d.h
31901+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
31902@@ -266,7 +266,7 @@ struct lis3lv02d {
31903 struct input_polled_dev *idev; /* input device */
31904 struct platform_device *pdev; /* platform device */
31905 struct regulator_bulk_data regulators[2];
31906- atomic_t count; /* interrupt count after last read */
31907+ atomic_unchecked_t count; /* interrupt count after last read */
31908 union axis_conversion ac; /* hw -> logical axis */
31909 int mapped_btns[3];
31910
31911diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
31912index 2f30bad..c4c13d0 100644
31913--- a/drivers/misc/sgi-gru/gruhandles.c
31914+++ b/drivers/misc/sgi-gru/gruhandles.c
31915@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
31916 unsigned long nsec;
31917
31918 nsec = CLKS2NSEC(clks);
31919- atomic_long_inc(&mcs_op_statistics[op].count);
31920- atomic_long_add(nsec, &mcs_op_statistics[op].total);
31921+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
31922+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
31923 if (mcs_op_statistics[op].max < nsec)
31924 mcs_op_statistics[op].max = nsec;
31925 }
31926diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
31927index 7768b87..f8aac38 100644
31928--- a/drivers/misc/sgi-gru/gruprocfs.c
31929+++ b/drivers/misc/sgi-gru/gruprocfs.c
31930@@ -32,9 +32,9 @@
31931
31932 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
31933
31934-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
31935+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
31936 {
31937- unsigned long val = atomic_long_read(v);
31938+ unsigned long val = atomic_long_read_unchecked(v);
31939
31940 seq_printf(s, "%16lu %s\n", val, id);
31941 }
31942@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
31943
31944 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
31945 for (op = 0; op < mcsop_last; op++) {
31946- count = atomic_long_read(&mcs_op_statistics[op].count);
31947- total = atomic_long_read(&mcs_op_statistics[op].total);
31948+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
31949+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
31950 max = mcs_op_statistics[op].max;
31951 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
31952 count ? total / count : 0, max);
31953diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
31954index 5c3ce24..4915ccb 100644
31955--- a/drivers/misc/sgi-gru/grutables.h
31956+++ b/drivers/misc/sgi-gru/grutables.h
31957@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
31958 * GRU statistics.
31959 */
31960 struct gru_stats_s {
31961- atomic_long_t vdata_alloc;
31962- atomic_long_t vdata_free;
31963- atomic_long_t gts_alloc;
31964- atomic_long_t gts_free;
31965- atomic_long_t gms_alloc;
31966- atomic_long_t gms_free;
31967- atomic_long_t gts_double_allocate;
31968- atomic_long_t assign_context;
31969- atomic_long_t assign_context_failed;
31970- atomic_long_t free_context;
31971- atomic_long_t load_user_context;
31972- atomic_long_t load_kernel_context;
31973- atomic_long_t lock_kernel_context;
31974- atomic_long_t unlock_kernel_context;
31975- atomic_long_t steal_user_context;
31976- atomic_long_t steal_kernel_context;
31977- atomic_long_t steal_context_failed;
31978- atomic_long_t nopfn;
31979- atomic_long_t asid_new;
31980- atomic_long_t asid_next;
31981- atomic_long_t asid_wrap;
31982- atomic_long_t asid_reuse;
31983- atomic_long_t intr;
31984- atomic_long_t intr_cbr;
31985- atomic_long_t intr_tfh;
31986- atomic_long_t intr_spurious;
31987- atomic_long_t intr_mm_lock_failed;
31988- atomic_long_t call_os;
31989- atomic_long_t call_os_wait_queue;
31990- atomic_long_t user_flush_tlb;
31991- atomic_long_t user_unload_context;
31992- atomic_long_t user_exception;
31993- atomic_long_t set_context_option;
31994- atomic_long_t check_context_retarget_intr;
31995- atomic_long_t check_context_unload;
31996- atomic_long_t tlb_dropin;
31997- atomic_long_t tlb_preload_page;
31998- atomic_long_t tlb_dropin_fail_no_asid;
31999- atomic_long_t tlb_dropin_fail_upm;
32000- atomic_long_t tlb_dropin_fail_invalid;
32001- atomic_long_t tlb_dropin_fail_range_active;
32002- atomic_long_t tlb_dropin_fail_idle;
32003- atomic_long_t tlb_dropin_fail_fmm;
32004- atomic_long_t tlb_dropin_fail_no_exception;
32005- atomic_long_t tfh_stale_on_fault;
32006- atomic_long_t mmu_invalidate_range;
32007- atomic_long_t mmu_invalidate_page;
32008- atomic_long_t flush_tlb;
32009- atomic_long_t flush_tlb_gru;
32010- atomic_long_t flush_tlb_gru_tgh;
32011- atomic_long_t flush_tlb_gru_zero_asid;
32012+ atomic_long_unchecked_t vdata_alloc;
32013+ atomic_long_unchecked_t vdata_free;
32014+ atomic_long_unchecked_t gts_alloc;
32015+ atomic_long_unchecked_t gts_free;
32016+ atomic_long_unchecked_t gms_alloc;
32017+ atomic_long_unchecked_t gms_free;
32018+ atomic_long_unchecked_t gts_double_allocate;
32019+ atomic_long_unchecked_t assign_context;
32020+ atomic_long_unchecked_t assign_context_failed;
32021+ atomic_long_unchecked_t free_context;
32022+ atomic_long_unchecked_t load_user_context;
32023+ atomic_long_unchecked_t load_kernel_context;
32024+ atomic_long_unchecked_t lock_kernel_context;
32025+ atomic_long_unchecked_t unlock_kernel_context;
32026+ atomic_long_unchecked_t steal_user_context;
32027+ atomic_long_unchecked_t steal_kernel_context;
32028+ atomic_long_unchecked_t steal_context_failed;
32029+ atomic_long_unchecked_t nopfn;
32030+ atomic_long_unchecked_t asid_new;
32031+ atomic_long_unchecked_t asid_next;
32032+ atomic_long_unchecked_t asid_wrap;
32033+ atomic_long_unchecked_t asid_reuse;
32034+ atomic_long_unchecked_t intr;
32035+ atomic_long_unchecked_t intr_cbr;
32036+ atomic_long_unchecked_t intr_tfh;
32037+ atomic_long_unchecked_t intr_spurious;
32038+ atomic_long_unchecked_t intr_mm_lock_failed;
32039+ atomic_long_unchecked_t call_os;
32040+ atomic_long_unchecked_t call_os_wait_queue;
32041+ atomic_long_unchecked_t user_flush_tlb;
32042+ atomic_long_unchecked_t user_unload_context;
32043+ atomic_long_unchecked_t user_exception;
32044+ atomic_long_unchecked_t set_context_option;
32045+ atomic_long_unchecked_t check_context_retarget_intr;
32046+ atomic_long_unchecked_t check_context_unload;
32047+ atomic_long_unchecked_t tlb_dropin;
32048+ atomic_long_unchecked_t tlb_preload_page;
32049+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32050+ atomic_long_unchecked_t tlb_dropin_fail_upm;
32051+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
32052+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
32053+ atomic_long_unchecked_t tlb_dropin_fail_idle;
32054+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
32055+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32056+ atomic_long_unchecked_t tfh_stale_on_fault;
32057+ atomic_long_unchecked_t mmu_invalidate_range;
32058+ atomic_long_unchecked_t mmu_invalidate_page;
32059+ atomic_long_unchecked_t flush_tlb;
32060+ atomic_long_unchecked_t flush_tlb_gru;
32061+ atomic_long_unchecked_t flush_tlb_gru_tgh;
32062+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32063
32064- atomic_long_t copy_gpa;
32065- atomic_long_t read_gpa;
32066+ atomic_long_unchecked_t copy_gpa;
32067+ atomic_long_unchecked_t read_gpa;
32068
32069- atomic_long_t mesq_receive;
32070- atomic_long_t mesq_receive_none;
32071- atomic_long_t mesq_send;
32072- atomic_long_t mesq_send_failed;
32073- atomic_long_t mesq_noop;
32074- atomic_long_t mesq_send_unexpected_error;
32075- atomic_long_t mesq_send_lb_overflow;
32076- atomic_long_t mesq_send_qlimit_reached;
32077- atomic_long_t mesq_send_amo_nacked;
32078- atomic_long_t mesq_send_put_nacked;
32079- atomic_long_t mesq_page_overflow;
32080- atomic_long_t mesq_qf_locked;
32081- atomic_long_t mesq_qf_noop_not_full;
32082- atomic_long_t mesq_qf_switch_head_failed;
32083- atomic_long_t mesq_qf_unexpected_error;
32084- atomic_long_t mesq_noop_unexpected_error;
32085- atomic_long_t mesq_noop_lb_overflow;
32086- atomic_long_t mesq_noop_qlimit_reached;
32087- atomic_long_t mesq_noop_amo_nacked;
32088- atomic_long_t mesq_noop_put_nacked;
32089- atomic_long_t mesq_noop_page_overflow;
32090+ atomic_long_unchecked_t mesq_receive;
32091+ atomic_long_unchecked_t mesq_receive_none;
32092+ atomic_long_unchecked_t mesq_send;
32093+ atomic_long_unchecked_t mesq_send_failed;
32094+ atomic_long_unchecked_t mesq_noop;
32095+ atomic_long_unchecked_t mesq_send_unexpected_error;
32096+ atomic_long_unchecked_t mesq_send_lb_overflow;
32097+ atomic_long_unchecked_t mesq_send_qlimit_reached;
32098+ atomic_long_unchecked_t mesq_send_amo_nacked;
32099+ atomic_long_unchecked_t mesq_send_put_nacked;
32100+ atomic_long_unchecked_t mesq_page_overflow;
32101+ atomic_long_unchecked_t mesq_qf_locked;
32102+ atomic_long_unchecked_t mesq_qf_noop_not_full;
32103+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
32104+ atomic_long_unchecked_t mesq_qf_unexpected_error;
32105+ atomic_long_unchecked_t mesq_noop_unexpected_error;
32106+ atomic_long_unchecked_t mesq_noop_lb_overflow;
32107+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
32108+ atomic_long_unchecked_t mesq_noop_amo_nacked;
32109+ atomic_long_unchecked_t mesq_noop_put_nacked;
32110+ atomic_long_unchecked_t mesq_noop_page_overflow;
32111
32112 };
32113
32114@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32115 tghop_invalidate, mcsop_last};
32116
32117 struct mcs_op_statistic {
32118- atomic_long_t count;
32119- atomic_long_t total;
32120+ atomic_long_unchecked_t count;
32121+ atomic_long_unchecked_t total;
32122 unsigned long max;
32123 };
32124
32125@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32126
32127 #define STAT(id) do { \
32128 if (gru_options & OPT_STATS) \
32129- atomic_long_inc(&gru_stats.id); \
32130+ atomic_long_inc_unchecked(&gru_stats.id); \
32131 } while (0)
32132
32133 #ifdef CONFIG_SGI_GRU_DEBUG
32134diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32135index 851b2f2..a4ec097 100644
32136--- a/drivers/misc/sgi-xp/xp.h
32137+++ b/drivers/misc/sgi-xp/xp.h
32138@@ -289,7 +289,7 @@ struct xpc_interface {
32139 xpc_notify_func, void *);
32140 void (*received) (short, int, void *);
32141 enum xp_retval (*partid_to_nasids) (short, void *);
32142-};
32143+} __no_const;
32144
32145 extern struct xpc_interface xpc_interface;
32146
32147diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32148index b94d5f7..7f494c5 100644
32149--- a/drivers/misc/sgi-xp/xpc.h
32150+++ b/drivers/misc/sgi-xp/xpc.h
32151@@ -835,6 +835,7 @@ struct xpc_arch_operations {
32152 void (*received_payload) (struct xpc_channel *, void *);
32153 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32154 };
32155+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32156
32157 /* struct xpc_partition act_state values (for XPC HB) */
32158
32159@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32160 /* found in xpc_main.c */
32161 extern struct device *xpc_part;
32162 extern struct device *xpc_chan;
32163-extern struct xpc_arch_operations xpc_arch_ops;
32164+extern xpc_arch_operations_no_const xpc_arch_ops;
32165 extern int xpc_disengage_timelimit;
32166 extern int xpc_disengage_timedout;
32167 extern int xpc_activate_IRQ_rcvd;
32168diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32169index 8d082b4..aa749ae 100644
32170--- a/drivers/misc/sgi-xp/xpc_main.c
32171+++ b/drivers/misc/sgi-xp/xpc_main.c
32172@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32173 .notifier_call = xpc_system_die,
32174 };
32175
32176-struct xpc_arch_operations xpc_arch_ops;
32177+xpc_arch_operations_no_const xpc_arch_ops;
32178
32179 /*
32180 * Timer function to enforce the timelimit on the partition disengage.
32181diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32182index 6878a94..fe5c5f1 100644
32183--- a/drivers/mmc/host/sdhci-pci.c
32184+++ b/drivers/mmc/host/sdhci-pci.c
32185@@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32186 .probe = via_probe,
32187 };
32188
32189-static const struct pci_device_id pci_ids[] __devinitdata = {
32190+static const struct pci_device_id pci_ids[] __devinitconst = {
32191 {
32192 .vendor = PCI_VENDOR_ID_RICOH,
32193 .device = PCI_DEVICE_ID_RICOH_R5C822,
32194diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32195index e9fad91..0a7a16a 100644
32196--- a/drivers/mtd/devices/doc2000.c
32197+++ b/drivers/mtd/devices/doc2000.c
32198@@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32199
32200 /* The ECC will not be calculated correctly if less than 512 is written */
32201 /* DBB-
32202- if (len != 0x200 && eccbuf)
32203+ if (len != 0x200)
32204 printk(KERN_WARNING
32205 "ECC needs a full sector write (adr: %lx size %lx)\n",
32206 (long) to, (long) len);
32207diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32208index a3f7a27..234016e 100644
32209--- a/drivers/mtd/devices/doc2001.c
32210+++ b/drivers/mtd/devices/doc2001.c
32211@@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32212 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32213
32214 /* Don't allow read past end of device */
32215- if (from >= this->totlen)
32216+ if (from >= this->totlen || !len)
32217 return -EINVAL;
32218
32219 /* Don't allow a single read to cross a 512-byte block boundary */
32220diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32221index 3984d48..28aa897 100644
32222--- a/drivers/mtd/nand/denali.c
32223+++ b/drivers/mtd/nand/denali.c
32224@@ -26,6 +26,7 @@
32225 #include <linux/pci.h>
32226 #include <linux/mtd/mtd.h>
32227 #include <linux/module.h>
32228+#include <linux/slab.h>
32229
32230 #include "denali.h"
32231
32232diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32233index ac40925..483b753 100644
32234--- a/drivers/mtd/nftlmount.c
32235+++ b/drivers/mtd/nftlmount.c
32236@@ -24,6 +24,7 @@
32237 #include <asm/errno.h>
32238 #include <linux/delay.h>
32239 #include <linux/slab.h>
32240+#include <linux/sched.h>
32241 #include <linux/mtd/mtd.h>
32242 #include <linux/mtd/nand.h>
32243 #include <linux/mtd/nftl.h>
32244diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32245index 6c3fb5a..c542a81 100644
32246--- a/drivers/mtd/ubi/build.c
32247+++ b/drivers/mtd/ubi/build.c
32248@@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32249 static int __init bytes_str_to_int(const char *str)
32250 {
32251 char *endp;
32252- unsigned long result;
32253+ unsigned long result, scale = 1;
32254
32255 result = simple_strtoul(str, &endp, 0);
32256 if (str == endp || result >= INT_MAX) {
32257@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32258
32259 switch (*endp) {
32260 case 'G':
32261- result *= 1024;
32262+ scale *= 1024;
32263 case 'M':
32264- result *= 1024;
32265+ scale *= 1024;
32266 case 'K':
32267- result *= 1024;
32268+ scale *= 1024;
32269 if (endp[1] == 'i' && endp[2] == 'B')
32270 endp += 2;
32271 case '\0':
32272@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32273 return -EINVAL;
32274 }
32275
32276- return result;
32277+ if ((intoverflow_t)result*scale >= INT_MAX) {
32278+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32279+ str);
32280+ return -EINVAL;
32281+ }
32282+
32283+ return result*scale;
32284 }
32285
32286 /**
32287diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
32288index 1feae59..c2a61d2 100644
32289--- a/drivers/net/ethernet/atheros/atlx/atl2.c
32290+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
32291@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32292 */
32293
32294 #define ATL2_PARAM(X, desc) \
32295- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32296+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32297 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32298 MODULE_PARM_DESC(X, desc);
32299 #else
32300diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32301index 9a517c2..a50cfcb 100644
32302--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32303+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32304@@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32305
32306 int (*wait_comp)(struct bnx2x *bp,
32307 struct bnx2x_rx_mode_ramrod_params *p);
32308-};
32309+} __no_const;
32310
32311 /********************** Set multicast group ***********************************/
32312
32313diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
32314index 94b4bd0..73c02de 100644
32315--- a/drivers/net/ethernet/broadcom/tg3.h
32316+++ b/drivers/net/ethernet/broadcom/tg3.h
32317@@ -134,6 +134,7 @@
32318 #define CHIPREV_ID_5750_A0 0x4000
32319 #define CHIPREV_ID_5750_A1 0x4001
32320 #define CHIPREV_ID_5750_A3 0x4003
32321+#define CHIPREV_ID_5750_C1 0x4201
32322 #define CHIPREV_ID_5750_C2 0x4202
32323 #define CHIPREV_ID_5752_A0_HW 0x5000
32324 #define CHIPREV_ID_5752_A0 0x6000
32325diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32326index c5f5479..2e8c260 100644
32327--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32328+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32329@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
32330 */
32331 struct l2t_skb_cb {
32332 arp_failure_handler_func arp_failure_handler;
32333-};
32334+} __no_const;
32335
32336 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
32337
32338diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
32339index 871bcaa..4043505 100644
32340--- a/drivers/net/ethernet/dec/tulip/de4x5.c
32341+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
32342@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32343 for (i=0; i<ETH_ALEN; i++) {
32344 tmp.addr[i] = dev->dev_addr[i];
32345 }
32346- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32347+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32348 break;
32349
32350 case DE4X5_SET_HWADDR: /* Set the hardware address */
32351@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32352 spin_lock_irqsave(&lp->lock, flags);
32353 memcpy(&statbuf, &lp->pktStats, ioc->len);
32354 spin_unlock_irqrestore(&lp->lock, flags);
32355- if (copy_to_user(ioc->data, &statbuf, ioc->len))
32356+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32357 return -EFAULT;
32358 break;
32359 }
32360diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
32361index 14d5b61..1398636 100644
32362--- a/drivers/net/ethernet/dec/tulip/eeprom.c
32363+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
32364@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
32365 {NULL}};
32366
32367
32368-static const char *block_name[] __devinitdata = {
32369+static const char *block_name[] __devinitconst = {
32370 "21140 non-MII",
32371 "21140 MII PHY",
32372 "21142 Serial PHY",
32373diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
32374index 4d01219..b58d26d 100644
32375--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
32376+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
32377@@ -236,7 +236,7 @@ struct pci_id_info {
32378 int drv_flags; /* Driver use, intended as capability flags. */
32379 };
32380
32381-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32382+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32383 { /* Sometime a Level-One switch card. */
32384 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32385 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32386diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
32387index dcd7f7a..ecb7fb3 100644
32388--- a/drivers/net/ethernet/dlink/sundance.c
32389+++ b/drivers/net/ethernet/dlink/sundance.c
32390@@ -218,7 +218,7 @@ enum {
32391 struct pci_id_info {
32392 const char *name;
32393 };
32394-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32395+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32396 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32397 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32398 {"D-Link DFE-580TX 4 port Server Adapter"},
32399diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
32400index bf266a0..e024af7 100644
32401--- a/drivers/net/ethernet/emulex/benet/be_main.c
32402+++ b/drivers/net/ethernet/emulex/benet/be_main.c
32403@@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
32404
32405 if (wrapped)
32406 newacc += 65536;
32407- ACCESS_ONCE(*acc) = newacc;
32408+ ACCESS_ONCE_RW(*acc) = newacc;
32409 }
32410
32411 void be_parse_stats(struct be_adapter *adapter)
32412diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
32413index 61d2bdd..7f1154a 100644
32414--- a/drivers/net/ethernet/fealnx.c
32415+++ b/drivers/net/ethernet/fealnx.c
32416@@ -150,7 +150,7 @@ struct chip_info {
32417 int flags;
32418 };
32419
32420-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
32421+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
32422 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32423 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
32424 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32425diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32426index e1159e5..e18684d 100644
32427--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32428+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32429@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
32430 {
32431 struct e1000_hw *hw = &adapter->hw;
32432 struct e1000_mac_info *mac = &hw->mac;
32433- struct e1000_mac_operations *func = &mac->ops;
32434+ e1000_mac_operations_no_const *func = &mac->ops;
32435
32436 /* Set media type */
32437 switch (adapter->pdev->device) {
32438diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
32439index a3e65fd..f451444 100644
32440--- a/drivers/net/ethernet/intel/e1000e/82571.c
32441+++ b/drivers/net/ethernet/intel/e1000e/82571.c
32442@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
32443 {
32444 struct e1000_hw *hw = &adapter->hw;
32445 struct e1000_mac_info *mac = &hw->mac;
32446- struct e1000_mac_operations *func = &mac->ops;
32447+ e1000_mac_operations_no_const *func = &mac->ops;
32448 u32 swsm = 0;
32449 u32 swsm2 = 0;
32450 bool force_clear_smbi = false;
32451diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
32452index 2967039..ca8c40c 100644
32453--- a/drivers/net/ethernet/intel/e1000e/hw.h
32454+++ b/drivers/net/ethernet/intel/e1000e/hw.h
32455@@ -778,6 +778,7 @@ struct e1000_mac_operations {
32456 void (*write_vfta)(struct e1000_hw *, u32, u32);
32457 s32 (*read_mac_addr)(struct e1000_hw *);
32458 };
32459+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32460
32461 /*
32462 * When to use various PHY register access functions:
32463@@ -818,6 +819,7 @@ struct e1000_phy_operations {
32464 void (*power_up)(struct e1000_hw *);
32465 void (*power_down)(struct e1000_hw *);
32466 };
32467+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32468
32469 /* Function pointers for the NVM. */
32470 struct e1000_nvm_operations {
32471@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
32472 s32 (*validate)(struct e1000_hw *);
32473 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32474 };
32475+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32476
32477 struct e1000_mac_info {
32478- struct e1000_mac_operations ops;
32479+ e1000_mac_operations_no_const ops;
32480 u8 addr[ETH_ALEN];
32481 u8 perm_addr[ETH_ALEN];
32482
32483@@ -872,7 +875,7 @@ struct e1000_mac_info {
32484 };
32485
32486 struct e1000_phy_info {
32487- struct e1000_phy_operations ops;
32488+ e1000_phy_operations_no_const ops;
32489
32490 enum e1000_phy_type type;
32491
32492@@ -906,7 +909,7 @@ struct e1000_phy_info {
32493 };
32494
32495 struct e1000_nvm_info {
32496- struct e1000_nvm_operations ops;
32497+ e1000_nvm_operations_no_const ops;
32498
32499 enum e1000_nvm_type type;
32500 enum e1000_nvm_override override;
32501diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
32502index 4519a13..f97fcd0 100644
32503--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
32504+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
32505@@ -314,6 +314,7 @@ struct e1000_mac_operations {
32506 s32 (*read_mac_addr)(struct e1000_hw *);
32507 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
32508 };
32509+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32510
32511 struct e1000_phy_operations {
32512 s32 (*acquire)(struct e1000_hw *);
32513@@ -330,6 +331,7 @@ struct e1000_phy_operations {
32514 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32515 s32 (*write_reg)(struct e1000_hw *, u32, u16);
32516 };
32517+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32518
32519 struct e1000_nvm_operations {
32520 s32 (*acquire)(struct e1000_hw *);
32521@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
32522 s32 (*update)(struct e1000_hw *);
32523 s32 (*validate)(struct e1000_hw *);
32524 };
32525+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32526
32527 struct e1000_info {
32528 s32 (*get_invariants)(struct e1000_hw *);
32529@@ -350,7 +353,7 @@ struct e1000_info {
32530 extern const struct e1000_info e1000_82575_info;
32531
32532 struct e1000_mac_info {
32533- struct e1000_mac_operations ops;
32534+ e1000_mac_operations_no_const ops;
32535
32536 u8 addr[6];
32537 u8 perm_addr[6];
32538@@ -388,7 +391,7 @@ struct e1000_mac_info {
32539 };
32540
32541 struct e1000_phy_info {
32542- struct e1000_phy_operations ops;
32543+ e1000_phy_operations_no_const ops;
32544
32545 enum e1000_phy_type type;
32546
32547@@ -423,7 +426,7 @@ struct e1000_phy_info {
32548 };
32549
32550 struct e1000_nvm_info {
32551- struct e1000_nvm_operations ops;
32552+ e1000_nvm_operations_no_const ops;
32553 enum e1000_nvm_type type;
32554 enum e1000_nvm_override override;
32555
32556@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
32557 s32 (*check_for_ack)(struct e1000_hw *, u16);
32558 s32 (*check_for_rst)(struct e1000_hw *, u16);
32559 };
32560+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32561
32562 struct e1000_mbx_stats {
32563 u32 msgs_tx;
32564@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
32565 };
32566
32567 struct e1000_mbx_info {
32568- struct e1000_mbx_operations ops;
32569+ e1000_mbx_operations_no_const ops;
32570 struct e1000_mbx_stats stats;
32571 u32 timeout;
32572 u32 usec_delay;
32573diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
32574index d7ed58f..64cde36 100644
32575--- a/drivers/net/ethernet/intel/igbvf/vf.h
32576+++ b/drivers/net/ethernet/intel/igbvf/vf.h
32577@@ -189,9 +189,10 @@ struct e1000_mac_operations {
32578 s32 (*read_mac_addr)(struct e1000_hw *);
32579 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
32580 };
32581+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32582
32583 struct e1000_mac_info {
32584- struct e1000_mac_operations ops;
32585+ e1000_mac_operations_no_const ops;
32586 u8 addr[6];
32587 u8 perm_addr[6];
32588
32589@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
32590 s32 (*check_for_ack)(struct e1000_hw *);
32591 s32 (*check_for_rst)(struct e1000_hw *);
32592 };
32593+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32594
32595 struct e1000_mbx_stats {
32596 u32 msgs_tx;
32597@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
32598 };
32599
32600 struct e1000_mbx_info {
32601- struct e1000_mbx_operations ops;
32602+ e1000_mbx_operations_no_const ops;
32603 struct e1000_mbx_stats stats;
32604 u32 timeout;
32605 u32 usec_delay;
32606diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32607index 6c5cca8..de8ef63 100644
32608--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32609+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32610@@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
32611 s32 (*update_checksum)(struct ixgbe_hw *);
32612 u16 (*calc_checksum)(struct ixgbe_hw *);
32613 };
32614+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
32615
32616 struct ixgbe_mac_operations {
32617 s32 (*init_hw)(struct ixgbe_hw *);
32618@@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
32619 /* Manageability interface */
32620 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
32621 };
32622+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32623
32624 struct ixgbe_phy_operations {
32625 s32 (*identify)(struct ixgbe_hw *);
32626@@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
32627 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
32628 s32 (*check_overtemp)(struct ixgbe_hw *);
32629 };
32630+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
32631
32632 struct ixgbe_eeprom_info {
32633- struct ixgbe_eeprom_operations ops;
32634+ ixgbe_eeprom_operations_no_const ops;
32635 enum ixgbe_eeprom_type type;
32636 u32 semaphore_delay;
32637 u16 word_size;
32638@@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
32639
32640 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
32641 struct ixgbe_mac_info {
32642- struct ixgbe_mac_operations ops;
32643+ ixgbe_mac_operations_no_const ops;
32644 enum ixgbe_mac_type type;
32645 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32646 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32647@@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
32648 };
32649
32650 struct ixgbe_phy_info {
32651- struct ixgbe_phy_operations ops;
32652+ ixgbe_phy_operations_no_const ops;
32653 struct mdio_if_info mdio;
32654 enum ixgbe_phy_type type;
32655 u32 id;
32656@@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
32657 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
32658 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
32659 };
32660+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32661
32662 struct ixgbe_mbx_stats {
32663 u32 msgs_tx;
32664@@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
32665 };
32666
32667 struct ixgbe_mbx_info {
32668- struct ixgbe_mbx_operations ops;
32669+ ixgbe_mbx_operations_no_const ops;
32670 struct ixgbe_mbx_stats stats;
32671 u32 timeout;
32672 u32 usec_delay;
32673diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
32674index 10306b4..28df758 100644
32675--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
32676+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
32677@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
32678 s32 (*clear_vfta)(struct ixgbe_hw *);
32679 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
32680 };
32681+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32682
32683 enum ixgbe_mac_type {
32684 ixgbe_mac_unknown = 0,
32685@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
32686 };
32687
32688 struct ixgbe_mac_info {
32689- struct ixgbe_mac_operations ops;
32690+ ixgbe_mac_operations_no_const ops;
32691 u8 addr[6];
32692 u8 perm_addr[6];
32693
32694@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
32695 s32 (*check_for_ack)(struct ixgbe_hw *);
32696 s32 (*check_for_rst)(struct ixgbe_hw *);
32697 };
32698+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32699
32700 struct ixgbe_mbx_stats {
32701 u32 msgs_tx;
32702@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
32703 };
32704
32705 struct ixgbe_mbx_info {
32706- struct ixgbe_mbx_operations ops;
32707+ ixgbe_mbx_operations_no_const ops;
32708 struct ixgbe_mbx_stats stats;
32709 u32 timeout;
32710 u32 udelay;
32711diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
32712index 94bbc85..78c12e6 100644
32713--- a/drivers/net/ethernet/mellanox/mlx4/main.c
32714+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
32715@@ -40,6 +40,7 @@
32716 #include <linux/dma-mapping.h>
32717 #include <linux/slab.h>
32718 #include <linux/io-mapping.h>
32719+#include <linux/sched.h>
32720
32721 #include <linux/mlx4/device.h>
32722 #include <linux/mlx4/doorbell.h>
32723diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32724index 5046a64..71ca936 100644
32725--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
32726+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32727@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
32728 void (*link_down)(struct __vxge_hw_device *devh);
32729 void (*crit_err)(struct __vxge_hw_device *devh,
32730 enum vxge_hw_event type, u64 ext_data);
32731-};
32732+} __no_const;
32733
32734 /*
32735 * struct __vxge_hw_blockpool_entry - Block private data structure
32736diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32737index 4a518a3..936b334 100644
32738--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32739+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32740@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32741 struct vxge_hw_mempool_dma *dma_object,
32742 u32 index,
32743 u32 is_last);
32744-};
32745+} __no_const;
32746
32747 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32748 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32749diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
32750index c8f47f1..5da9840 100644
32751--- a/drivers/net/ethernet/realtek/r8169.c
32752+++ b/drivers/net/ethernet/realtek/r8169.c
32753@@ -698,17 +698,17 @@ struct rtl8169_private {
32754 struct mdio_ops {
32755 void (*write)(void __iomem *, int, int);
32756 int (*read)(void __iomem *, int);
32757- } mdio_ops;
32758+ } __no_const mdio_ops;
32759
32760 struct pll_power_ops {
32761 void (*down)(struct rtl8169_private *);
32762 void (*up)(struct rtl8169_private *);
32763- } pll_power_ops;
32764+ } __no_const pll_power_ops;
32765
32766 struct jumbo_ops {
32767 void (*enable)(struct rtl8169_private *);
32768 void (*disable)(struct rtl8169_private *);
32769- } jumbo_ops;
32770+ } __no_const jumbo_ops;
32771
32772 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
32773 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
32774diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
32775index 1b4658c..a30dabb 100644
32776--- a/drivers/net/ethernet/sis/sis190.c
32777+++ b/drivers/net/ethernet/sis/sis190.c
32778@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
32779 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
32780 struct net_device *dev)
32781 {
32782- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
32783+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
32784 struct sis190_private *tp = netdev_priv(dev);
32785 struct pci_dev *isa_bridge;
32786 u8 reg, tmp8;
32787diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
32788index edfa15d..002bfa9 100644
32789--- a/drivers/net/ppp/ppp_generic.c
32790+++ b/drivers/net/ppp/ppp_generic.c
32791@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32792 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
32793 struct ppp_stats stats;
32794 struct ppp_comp_stats cstats;
32795- char *vers;
32796
32797 switch (cmd) {
32798 case SIOCGPPPSTATS:
32799@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32800 break;
32801
32802 case SIOCGPPPVER:
32803- vers = PPP_VERSION;
32804- if (copy_to_user(addr, vers, strlen(vers) + 1))
32805+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
32806 break;
32807 err = 0;
32808 break;
32809diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
32810index 515f122..41dd273 100644
32811--- a/drivers/net/tokenring/abyss.c
32812+++ b/drivers/net/tokenring/abyss.c
32813@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
32814
32815 static int __init abyss_init (void)
32816 {
32817- abyss_netdev_ops = tms380tr_netdev_ops;
32818+ pax_open_kernel();
32819+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32820
32821- abyss_netdev_ops.ndo_open = abyss_open;
32822- abyss_netdev_ops.ndo_stop = abyss_close;
32823+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
32824+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
32825+ pax_close_kernel();
32826
32827 return pci_register_driver(&abyss_driver);
32828 }
32829diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
32830index 6153cfd..cf69c1c 100644
32831--- a/drivers/net/tokenring/madgemc.c
32832+++ b/drivers/net/tokenring/madgemc.c
32833@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
32834
32835 static int __init madgemc_init (void)
32836 {
32837- madgemc_netdev_ops = tms380tr_netdev_ops;
32838- madgemc_netdev_ops.ndo_open = madgemc_open;
32839- madgemc_netdev_ops.ndo_stop = madgemc_close;
32840+ pax_open_kernel();
32841+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32842+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
32843+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
32844+ pax_close_kernel();
32845
32846 return mca_register_driver (&madgemc_driver);
32847 }
32848diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
32849index 8d362e6..f91cc52 100644
32850--- a/drivers/net/tokenring/proteon.c
32851+++ b/drivers/net/tokenring/proteon.c
32852@@ -353,9 +353,11 @@ static int __init proteon_init(void)
32853 struct platform_device *pdev;
32854 int i, num = 0, err = 0;
32855
32856- proteon_netdev_ops = tms380tr_netdev_ops;
32857- proteon_netdev_ops.ndo_open = proteon_open;
32858- proteon_netdev_ops.ndo_stop = tms380tr_close;
32859+ pax_open_kernel();
32860+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32861+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
32862+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
32863+ pax_close_kernel();
32864
32865 err = platform_driver_register(&proteon_driver);
32866 if (err)
32867diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
32868index 46db5c5..37c1536 100644
32869--- a/drivers/net/tokenring/skisa.c
32870+++ b/drivers/net/tokenring/skisa.c
32871@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
32872 struct platform_device *pdev;
32873 int i, num = 0, err = 0;
32874
32875- sk_isa_netdev_ops = tms380tr_netdev_ops;
32876- sk_isa_netdev_ops.ndo_open = sk_isa_open;
32877- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32878+ pax_open_kernel();
32879+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32880+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
32881+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32882+ pax_close_kernel();
32883
32884 err = platform_driver_register(&sk_isa_driver);
32885 if (err)
32886diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
32887index 304fe78..db112fa 100644
32888--- a/drivers/net/usb/hso.c
32889+++ b/drivers/net/usb/hso.c
32890@@ -71,7 +71,7 @@
32891 #include <asm/byteorder.h>
32892 #include <linux/serial_core.h>
32893 #include <linux/serial.h>
32894-
32895+#include <asm/local.h>
32896
32897 #define MOD_AUTHOR "Option Wireless"
32898 #define MOD_DESCRIPTION "USB High Speed Option driver"
32899@@ -257,7 +257,7 @@ struct hso_serial {
32900
32901 /* from usb_serial_port */
32902 struct tty_struct *tty;
32903- int open_count;
32904+ local_t open_count;
32905 spinlock_t serial_lock;
32906
32907 int (*write_data) (struct hso_serial *serial);
32908@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
32909 struct urb *urb;
32910
32911 urb = serial->rx_urb[0];
32912- if (serial->open_count > 0) {
32913+ if (local_read(&serial->open_count) > 0) {
32914 count = put_rxbuf_data(urb, serial);
32915 if (count == -1)
32916 return;
32917@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
32918 DUMP1(urb->transfer_buffer, urb->actual_length);
32919
32920 /* Anyone listening? */
32921- if (serial->open_count == 0)
32922+ if (local_read(&serial->open_count) == 0)
32923 return;
32924
32925 if (status == 0) {
32926@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
32927 spin_unlock_irq(&serial->serial_lock);
32928
32929 /* check for port already opened, if not set the termios */
32930- serial->open_count++;
32931- if (serial->open_count == 1) {
32932+ if (local_inc_return(&serial->open_count) == 1) {
32933 serial->rx_state = RX_IDLE;
32934 /* Force default termio settings */
32935 _hso_serial_set_termios(tty, NULL);
32936@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
32937 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32938 if (result) {
32939 hso_stop_serial_device(serial->parent);
32940- serial->open_count--;
32941+ local_dec(&serial->open_count);
32942 kref_put(&serial->parent->ref, hso_serial_ref_free);
32943 }
32944 } else {
32945@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
32946
32947 /* reset the rts and dtr */
32948 /* do the actual close */
32949- serial->open_count--;
32950+ local_dec(&serial->open_count);
32951
32952- if (serial->open_count <= 0) {
32953- serial->open_count = 0;
32954+ if (local_read(&serial->open_count) <= 0) {
32955+ local_set(&serial->open_count, 0);
32956 spin_lock_irq(&serial->serial_lock);
32957 if (serial->tty == tty) {
32958 serial->tty->driver_data = NULL;
32959@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
32960
32961 /* the actual setup */
32962 spin_lock_irqsave(&serial->serial_lock, flags);
32963- if (serial->open_count)
32964+ if (local_read(&serial->open_count))
32965 _hso_serial_set_termios(tty, old);
32966 else
32967 tty->termios = old;
32968@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
32969 D1("Pending read interrupt on port %d\n", i);
32970 spin_lock(&serial->serial_lock);
32971 if (serial->rx_state == RX_IDLE &&
32972- serial->open_count > 0) {
32973+ local_read(&serial->open_count) > 0) {
32974 /* Setup and send a ctrl req read on
32975 * port i */
32976 if (!serial->rx_urb_filled[0]) {
32977@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
32978 /* Start all serial ports */
32979 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32980 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32981- if (dev2ser(serial_table[i])->open_count) {
32982+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
32983 result =
32984 hso_start_serial_device(serial_table[i], GFP_NOIO);
32985 hso_kick_transmit(dev2ser(serial_table[i]));
32986diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
32987index e662cbc..8d4a102 100644
32988--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
32989+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
32990@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
32991 * Return with error code if any of the queue indices
32992 * is out of range
32993 */
32994- if (p->ring_index[i] < 0 ||
32995- p->ring_index[i] >= adapter->num_rx_queues)
32996+ if (p->ring_index[i] >= adapter->num_rx_queues)
32997 return -EINVAL;
32998 }
32999
33000diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
33001index 0f9ee46..e2d6e65 100644
33002--- a/drivers/net/wireless/ath/ath.h
33003+++ b/drivers/net/wireless/ath/ath.h
33004@@ -119,6 +119,7 @@ struct ath_ops {
33005 void (*write_flush) (void *);
33006 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
33007 };
33008+typedef struct ath_ops __no_const ath_ops_no_const;
33009
33010 struct ath_common;
33011 struct ath_bus_ops;
33012diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33013index b592016..fe47870 100644
33014--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33015+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33016@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33017 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
33018 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
33019
33020- ACCESS_ONCE(ads->ds_link) = i->link;
33021- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
33022+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
33023+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
33024
33025 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
33026 ctl6 = SM(i->keytype, AR_EncrType);
33027@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33028
33029 if ((i->is_first || i->is_last) &&
33030 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
33031- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
33032+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
33033 | set11nTries(i->rates, 1)
33034 | set11nTries(i->rates, 2)
33035 | set11nTries(i->rates, 3)
33036 | (i->dur_update ? AR_DurUpdateEna : 0)
33037 | SM(0, AR_BurstDur);
33038
33039- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
33040+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
33041 | set11nRate(i->rates, 1)
33042 | set11nRate(i->rates, 2)
33043 | set11nRate(i->rates, 3);
33044 } else {
33045- ACCESS_ONCE(ads->ds_ctl2) = 0;
33046- ACCESS_ONCE(ads->ds_ctl3) = 0;
33047+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
33048+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
33049 }
33050
33051 if (!i->is_first) {
33052- ACCESS_ONCE(ads->ds_ctl0) = 0;
33053- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33054- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33055+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
33056+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33057+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33058 return;
33059 }
33060
33061@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33062 break;
33063 }
33064
33065- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33066+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33067 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33068 | SM(i->txpower, AR_XmitPower)
33069 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33070@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33071 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
33072 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
33073
33074- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33075- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33076+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33077+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33078
33079 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
33080 return;
33081
33082- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33083+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33084 | set11nPktDurRTSCTS(i->rates, 1);
33085
33086- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33087+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33088 | set11nPktDurRTSCTS(i->rates, 3);
33089
33090- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33091+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33092 | set11nRateFlags(i->rates, 1)
33093 | set11nRateFlags(i->rates, 2)
33094 | set11nRateFlags(i->rates, 3)
33095diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33096index f5ae3c6..7936af3 100644
33097--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33098+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33099@@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33100 (i->qcu << AR_TxQcuNum_S) | 0x17;
33101
33102 checksum += val;
33103- ACCESS_ONCE(ads->info) = val;
33104+ ACCESS_ONCE_RW(ads->info) = val;
33105
33106 checksum += i->link;
33107- ACCESS_ONCE(ads->link) = i->link;
33108+ ACCESS_ONCE_RW(ads->link) = i->link;
33109
33110 checksum += i->buf_addr[0];
33111- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
33112+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
33113 checksum += i->buf_addr[1];
33114- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
33115+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
33116 checksum += i->buf_addr[2];
33117- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
33118+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
33119 checksum += i->buf_addr[3];
33120- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
33121+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
33122
33123 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
33124- ACCESS_ONCE(ads->ctl3) = val;
33125+ ACCESS_ONCE_RW(ads->ctl3) = val;
33126 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
33127- ACCESS_ONCE(ads->ctl5) = val;
33128+ ACCESS_ONCE_RW(ads->ctl5) = val;
33129 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
33130- ACCESS_ONCE(ads->ctl7) = val;
33131+ ACCESS_ONCE_RW(ads->ctl7) = val;
33132 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
33133- ACCESS_ONCE(ads->ctl9) = val;
33134+ ACCESS_ONCE_RW(ads->ctl9) = val;
33135
33136 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
33137- ACCESS_ONCE(ads->ctl10) = checksum;
33138+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
33139
33140 if (i->is_first || i->is_last) {
33141- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
33142+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
33143 | set11nTries(i->rates, 1)
33144 | set11nTries(i->rates, 2)
33145 | set11nTries(i->rates, 3)
33146 | (i->dur_update ? AR_DurUpdateEna : 0)
33147 | SM(0, AR_BurstDur);
33148
33149- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
33150+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
33151 | set11nRate(i->rates, 1)
33152 | set11nRate(i->rates, 2)
33153 | set11nRate(i->rates, 3);
33154 } else {
33155- ACCESS_ONCE(ads->ctl13) = 0;
33156- ACCESS_ONCE(ads->ctl14) = 0;
33157+ ACCESS_ONCE_RW(ads->ctl13) = 0;
33158+ ACCESS_ONCE_RW(ads->ctl14) = 0;
33159 }
33160
33161 ads->ctl20 = 0;
33162@@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33163
33164 ctl17 = SM(i->keytype, AR_EncrType);
33165 if (!i->is_first) {
33166- ACCESS_ONCE(ads->ctl11) = 0;
33167- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33168- ACCESS_ONCE(ads->ctl15) = 0;
33169- ACCESS_ONCE(ads->ctl16) = 0;
33170- ACCESS_ONCE(ads->ctl17) = ctl17;
33171- ACCESS_ONCE(ads->ctl18) = 0;
33172- ACCESS_ONCE(ads->ctl19) = 0;
33173+ ACCESS_ONCE_RW(ads->ctl11) = 0;
33174+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33175+ ACCESS_ONCE_RW(ads->ctl15) = 0;
33176+ ACCESS_ONCE_RW(ads->ctl16) = 0;
33177+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33178+ ACCESS_ONCE_RW(ads->ctl18) = 0;
33179+ ACCESS_ONCE_RW(ads->ctl19) = 0;
33180 return;
33181 }
33182
33183- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33184+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33185 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33186 | SM(i->txpower, AR_XmitPower)
33187 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33188@@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33189 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
33190 ctl12 |= SM(val, AR_PAPRDChainMask);
33191
33192- ACCESS_ONCE(ads->ctl12) = ctl12;
33193- ACCESS_ONCE(ads->ctl17) = ctl17;
33194+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
33195+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33196
33197- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33198+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33199 | set11nPktDurRTSCTS(i->rates, 1);
33200
33201- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33202+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33203 | set11nPktDurRTSCTS(i->rates, 3);
33204
33205- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
33206+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
33207 | set11nRateFlags(i->rates, 1)
33208 | set11nRateFlags(i->rates, 2)
33209 | set11nRateFlags(i->rates, 3)
33210 | SM(i->rtscts_rate, AR_RTSCTSRate);
33211
33212- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
33213+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
33214 }
33215
33216 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
33217diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
33218index f389b3c..7359e18 100644
33219--- a/drivers/net/wireless/ath/ath9k/hw.h
33220+++ b/drivers/net/wireless/ath/ath9k/hw.h
33221@@ -605,7 +605,7 @@ struct ath_hw_private_ops {
33222
33223 /* ANI */
33224 void (*ani_cache_ini_regs)(struct ath_hw *ah);
33225-};
33226+} __no_const;
33227
33228 /**
33229 * struct ath_hw_ops - callbacks used by hardware code and driver code
33230@@ -635,7 +635,7 @@ struct ath_hw_ops {
33231 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
33232 struct ath_hw_antcomb_conf *antconf);
33233
33234-};
33235+} __no_const;
33236
33237 struct ath_nf_limits {
33238 s16 max;
33239@@ -655,7 +655,7 @@ enum ath_cal_list {
33240 #define AH_FASTCC 0x4
33241
33242 struct ath_hw {
33243- struct ath_ops reg_ops;
33244+ ath_ops_no_const reg_ops;
33245
33246 struct ieee80211_hw *hw;
33247 struct ath_common common;
33248diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33249index bea8524..c677c06 100644
33250--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33251+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33252@@ -547,7 +547,7 @@ struct phy_func_ptr {
33253 void (*carrsuppr)(struct brcms_phy *);
33254 s32 (*rxsigpwr)(struct brcms_phy *, s32);
33255 void (*detach)(struct brcms_phy *);
33256-};
33257+} __no_const;
33258
33259 struct brcms_phy {
33260 struct brcms_phy_pub pubpi_ro;
33261diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33262index 05f2ad1..ae00eea 100644
33263--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
33264+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33265@@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
33266 */
33267 if (iwl3945_mod_params.disable_hw_scan) {
33268 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
33269- iwl3945_hw_ops.hw_scan = NULL;
33270+ pax_open_kernel();
33271+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
33272+ pax_close_kernel();
33273 }
33274
33275 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
33276diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
33277index 69a77e2..552b42c 100644
33278--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
33279+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
33280@@ -71,8 +71,8 @@ do { \
33281 } while (0)
33282
33283 #else
33284-#define IWL_DEBUG(m, level, fmt, args...)
33285-#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
33286+#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
33287+#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
33288 #define iwl_print_hex_dump(m, level, p, len)
33289 #endif /* CONFIG_IWLWIFI_DEBUG */
33290
33291diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
33292index 523ad55..f8c5dc5 100644
33293--- a/drivers/net/wireless/mac80211_hwsim.c
33294+++ b/drivers/net/wireless/mac80211_hwsim.c
33295@@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
33296 return -EINVAL;
33297
33298 if (fake_hw_scan) {
33299- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33300- mac80211_hwsim_ops.sw_scan_start = NULL;
33301- mac80211_hwsim_ops.sw_scan_complete = NULL;
33302+ pax_open_kernel();
33303+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33304+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33305+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33306+ pax_close_kernel();
33307 }
33308
33309 spin_lock_init(&hwsim_radio_lock);
33310diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
33311index 30f138b..c904585 100644
33312--- a/drivers/net/wireless/mwifiex/main.h
33313+++ b/drivers/net/wireless/mwifiex/main.h
33314@@ -543,7 +543,7 @@ struct mwifiex_if_ops {
33315 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33316 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
33317 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
33318-};
33319+} __no_const;
33320
33321 struct mwifiex_adapter {
33322 u8 iface_type;
33323diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
33324index 0c13840..a5c3ed6 100644
33325--- a/drivers/net/wireless/rndis_wlan.c
33326+++ b/drivers/net/wireless/rndis_wlan.c
33327@@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
33328
33329 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33330
33331- if (rts_threshold < 0 || rts_threshold > 2347)
33332+ if (rts_threshold > 2347)
33333 rts_threshold = 2347;
33334
33335 tmp = cpu_to_le32(rts_threshold);
33336diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
33337index a77f1bb..c608b2b 100644
33338--- a/drivers/net/wireless/wl1251/wl1251.h
33339+++ b/drivers/net/wireless/wl1251/wl1251.h
33340@@ -266,7 +266,7 @@ struct wl1251_if_operations {
33341 void (*reset)(struct wl1251 *wl);
33342 void (*enable_irq)(struct wl1251 *wl);
33343 void (*disable_irq)(struct wl1251 *wl);
33344-};
33345+} __no_const;
33346
33347 struct wl1251 {
33348 struct ieee80211_hw *hw;
33349diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
33350index f34b5b2..b5abb9f 100644
33351--- a/drivers/oprofile/buffer_sync.c
33352+++ b/drivers/oprofile/buffer_sync.c
33353@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
33354 if (cookie == NO_COOKIE)
33355 offset = pc;
33356 if (cookie == INVALID_COOKIE) {
33357- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33358+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33359 offset = pc;
33360 }
33361 if (cookie != last_cookie) {
33362@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
33363 /* add userspace sample */
33364
33365 if (!mm) {
33366- atomic_inc(&oprofile_stats.sample_lost_no_mm);
33367+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33368 return 0;
33369 }
33370
33371 cookie = lookup_dcookie(mm, s->eip, &offset);
33372
33373 if (cookie == INVALID_COOKIE) {
33374- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33375+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33376 return 0;
33377 }
33378
33379@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33380 /* ignore backtraces if failed to add a sample */
33381 if (state == sb_bt_start) {
33382 state = sb_bt_ignore;
33383- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33384+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33385 }
33386 }
33387 release_mm(mm);
33388diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
33389index c0cc4e7..44d4e54 100644
33390--- a/drivers/oprofile/event_buffer.c
33391+++ b/drivers/oprofile/event_buffer.c
33392@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
33393 }
33394
33395 if (buffer_pos == buffer_size) {
33396- atomic_inc(&oprofile_stats.event_lost_overflow);
33397+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33398 return;
33399 }
33400
33401diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
33402index f8c752e..28bf4fc 100644
33403--- a/drivers/oprofile/oprof.c
33404+++ b/drivers/oprofile/oprof.c
33405@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
33406 if (oprofile_ops.switch_events())
33407 return;
33408
33409- atomic_inc(&oprofile_stats.multiplex_counter);
33410+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33411 start_switch_worker();
33412 }
33413
33414diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
33415index 917d28e..d62d981 100644
33416--- a/drivers/oprofile/oprofile_stats.c
33417+++ b/drivers/oprofile/oprofile_stats.c
33418@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33419 cpu_buf->sample_invalid_eip = 0;
33420 }
33421
33422- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33423- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33424- atomic_set(&oprofile_stats.event_lost_overflow, 0);
33425- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33426- atomic_set(&oprofile_stats.multiplex_counter, 0);
33427+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33428+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33429+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33430+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33431+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33432 }
33433
33434
33435diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
33436index 38b6fc0..b5cbfce 100644
33437--- a/drivers/oprofile/oprofile_stats.h
33438+++ b/drivers/oprofile/oprofile_stats.h
33439@@ -13,11 +13,11 @@
33440 #include <linux/atomic.h>
33441
33442 struct oprofile_stat_struct {
33443- atomic_t sample_lost_no_mm;
33444- atomic_t sample_lost_no_mapping;
33445- atomic_t bt_lost_no_mapping;
33446- atomic_t event_lost_overflow;
33447- atomic_t multiplex_counter;
33448+ atomic_unchecked_t sample_lost_no_mm;
33449+ atomic_unchecked_t sample_lost_no_mapping;
33450+ atomic_unchecked_t bt_lost_no_mapping;
33451+ atomic_unchecked_t event_lost_overflow;
33452+ atomic_unchecked_t multiplex_counter;
33453 };
33454
33455 extern struct oprofile_stat_struct oprofile_stats;
33456diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
33457index 2f0aa0f..90fab02 100644
33458--- a/drivers/oprofile/oprofilefs.c
33459+++ b/drivers/oprofile/oprofilefs.c
33460@@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
33461
33462
33463 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33464- char const *name, atomic_t *val)
33465+ char const *name, atomic_unchecked_t *val)
33466 {
33467 return __oprofilefs_create_file(sb, root, name,
33468 &atomic_ro_fops, 0444, val);
33469diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
33470index 3f56bc0..707d642 100644
33471--- a/drivers/parport/procfs.c
33472+++ b/drivers/parport/procfs.c
33473@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
33474
33475 *ppos += len;
33476
33477- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33478+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33479 }
33480
33481 #ifdef CONFIG_PARPORT_1284
33482@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
33483
33484 *ppos += len;
33485
33486- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33487+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33488 }
33489 #endif /* IEEE1284.3 support. */
33490
33491diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
33492index 9fff878..ad0ad53 100644
33493--- a/drivers/pci/hotplug/cpci_hotplug.h
33494+++ b/drivers/pci/hotplug/cpci_hotplug.h
33495@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33496 int (*hardware_test) (struct slot* slot, u32 value);
33497 u8 (*get_power) (struct slot* slot);
33498 int (*set_power) (struct slot* slot, int value);
33499-};
33500+} __no_const;
33501
33502 struct cpci_hp_controller {
33503 unsigned int irq;
33504diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
33505index 76ba8a1..20ca857 100644
33506--- a/drivers/pci/hotplug/cpqphp_nvram.c
33507+++ b/drivers/pci/hotplug/cpqphp_nvram.c
33508@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
33509
33510 void compaq_nvram_init (void __iomem *rom_start)
33511 {
33512+
33513+#ifndef CONFIG_PAX_KERNEXEC
33514 if (rom_start) {
33515 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33516 }
33517+#endif
33518+
33519 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33520
33521 /* initialize our int15 lock */
33522diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
33523index cbfbab1..6a9fced 100644
33524--- a/drivers/pci/pcie/aspm.c
33525+++ b/drivers/pci/pcie/aspm.c
33526@@ -27,9 +27,9 @@
33527 #define MODULE_PARAM_PREFIX "pcie_aspm."
33528
33529 /* Note: those are not register definitions */
33530-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33531-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33532-#define ASPM_STATE_L1 (4) /* L1 state */
33533+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33534+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33535+#define ASPM_STATE_L1 (4U) /* L1 state */
33536 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33537 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33538
33539diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
33540index 04e74f4..a960176 100644
33541--- a/drivers/pci/probe.c
33542+++ b/drivers/pci/probe.c
33543@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
33544 u32 l, sz, mask;
33545 u16 orig_cmd;
33546
33547- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33548+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33549
33550 if (!dev->mmio_always_on) {
33551 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33552diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
33553index 27911b5..5b6db88 100644
33554--- a/drivers/pci/proc.c
33555+++ b/drivers/pci/proc.c
33556@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
33557 static int __init pci_proc_init(void)
33558 {
33559 struct pci_dev *dev = NULL;
33560+
33561+#ifdef CONFIG_GRKERNSEC_PROC_ADD
33562+#ifdef CONFIG_GRKERNSEC_PROC_USER
33563+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33564+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33565+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33566+#endif
33567+#else
33568 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33569+#endif
33570 proc_create("devices", 0, proc_bus_pci_dir,
33571 &proc_bus_pci_dev_operations);
33572 proc_initialized = 1;
33573diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
33574index 7b82868..b9344c9 100644
33575--- a/drivers/platform/x86/thinkpad_acpi.c
33576+++ b/drivers/platform/x86/thinkpad_acpi.c
33577@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33578 return 0;
33579 }
33580
33581-void static hotkey_mask_warn_incomplete_mask(void)
33582+static void hotkey_mask_warn_incomplete_mask(void)
33583 {
33584 /* log only what the user can fix... */
33585 const u32 wantedmask = hotkey_driver_mask &
33586@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
33587 }
33588 }
33589
33590-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33591- struct tp_nvram_state *newn,
33592- const u32 event_mask)
33593-{
33594-
33595 #define TPACPI_COMPARE_KEY(__scancode, __member) \
33596 do { \
33597 if ((event_mask & (1 << __scancode)) && \
33598@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33599 tpacpi_hotkey_send_key(__scancode); \
33600 } while (0)
33601
33602- void issue_volchange(const unsigned int oldvol,
33603- const unsigned int newvol)
33604- {
33605- unsigned int i = oldvol;
33606+static void issue_volchange(const unsigned int oldvol,
33607+ const unsigned int newvol,
33608+ const u32 event_mask)
33609+{
33610+ unsigned int i = oldvol;
33611
33612- while (i > newvol) {
33613- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33614- i--;
33615- }
33616- while (i < newvol) {
33617- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33618- i++;
33619- }
33620+ while (i > newvol) {
33621+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33622+ i--;
33623 }
33624+ while (i < newvol) {
33625+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33626+ i++;
33627+ }
33628+}
33629
33630- void issue_brightnesschange(const unsigned int oldbrt,
33631- const unsigned int newbrt)
33632- {
33633- unsigned int i = oldbrt;
33634+static void issue_brightnesschange(const unsigned int oldbrt,
33635+ const unsigned int newbrt,
33636+ const u32 event_mask)
33637+{
33638+ unsigned int i = oldbrt;
33639
33640- while (i > newbrt) {
33641- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33642- i--;
33643- }
33644- while (i < newbrt) {
33645- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33646- i++;
33647- }
33648+ while (i > newbrt) {
33649+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33650+ i--;
33651+ }
33652+ while (i < newbrt) {
33653+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33654+ i++;
33655 }
33656+}
33657
33658+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33659+ struct tp_nvram_state *newn,
33660+ const u32 event_mask)
33661+{
33662 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
33663 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
33664 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
33665@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33666 oldn->volume_level != newn->volume_level) {
33667 /* recently muted, or repeated mute keypress, or
33668 * multiple presses ending in mute */
33669- issue_volchange(oldn->volume_level, newn->volume_level);
33670+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33671 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
33672 }
33673 } else {
33674@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33675 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33676 }
33677 if (oldn->volume_level != newn->volume_level) {
33678- issue_volchange(oldn->volume_level, newn->volume_level);
33679+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33680 } else if (oldn->volume_toggle != newn->volume_toggle) {
33681 /* repeated vol up/down keypress at end of scale ? */
33682 if (newn->volume_level == 0)
33683@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33684 /* handle brightness */
33685 if (oldn->brightness_level != newn->brightness_level) {
33686 issue_brightnesschange(oldn->brightness_level,
33687- newn->brightness_level);
33688+ newn->brightness_level,
33689+ event_mask);
33690 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
33691 /* repeated key presses that didn't change state */
33692 if (newn->brightness_level == 0)
33693@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33694 && !tp_features.bright_unkfw)
33695 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33696 }
33697+}
33698
33699 #undef TPACPI_COMPARE_KEY
33700 #undef TPACPI_MAY_SEND_KEY
33701-}
33702
33703 /*
33704 * Polling driver
33705diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
33706index b859d16..5cc6b1a 100644
33707--- a/drivers/pnp/pnpbios/bioscalls.c
33708+++ b/drivers/pnp/pnpbios/bioscalls.c
33709@@ -59,7 +59,7 @@ do { \
33710 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33711 } while(0)
33712
33713-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33714+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33715 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33716
33717 /*
33718@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33719
33720 cpu = get_cpu();
33721 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33722+
33723+ pax_open_kernel();
33724 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33725+ pax_close_kernel();
33726
33727 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33728 spin_lock_irqsave(&pnp_bios_lock, flags);
33729@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33730 :"memory");
33731 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33732
33733+ pax_open_kernel();
33734 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33735+ pax_close_kernel();
33736+
33737 put_cpu();
33738
33739 /* If we get here and this is set then the PnP BIOS faulted on us. */
33740@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
33741 return status;
33742 }
33743
33744-void pnpbios_calls_init(union pnp_bios_install_struct *header)
33745+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33746 {
33747 int i;
33748
33749@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33750 pnp_bios_callpoint.offset = header->fields.pm16offset;
33751 pnp_bios_callpoint.segment = PNP_CS16;
33752
33753+ pax_open_kernel();
33754+
33755 for_each_possible_cpu(i) {
33756 struct desc_struct *gdt = get_cpu_gdt_table(i);
33757 if (!gdt)
33758@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33759 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33760 (unsigned long)__va(header->fields.pm16dseg));
33761 }
33762+
33763+ pax_close_kernel();
33764 }
33765diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
33766index b0ecacb..7c9da2e 100644
33767--- a/drivers/pnp/resource.c
33768+++ b/drivers/pnp/resource.c
33769@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
33770 return 1;
33771
33772 /* check if the resource is valid */
33773- if (*irq < 0 || *irq > 15)
33774+ if (*irq > 15)
33775 return 0;
33776
33777 /* check if the resource is reserved */
33778@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
33779 return 1;
33780
33781 /* check if the resource is valid */
33782- if (*dma < 0 || *dma == 4 || *dma > 7)
33783+ if (*dma == 4 || *dma > 7)
33784 return 0;
33785
33786 /* check if the resource is reserved */
33787diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
33788index bb16f5b..c751eef 100644
33789--- a/drivers/power/bq27x00_battery.c
33790+++ b/drivers/power/bq27x00_battery.c
33791@@ -67,7 +67,7 @@
33792 struct bq27x00_device_info;
33793 struct bq27x00_access_methods {
33794 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
33795-};
33796+} __no_const;
33797
33798 enum bq27x00_chip { BQ27000, BQ27500 };
33799
33800diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
33801index 33f5d9a..d957d3f 100644
33802--- a/drivers/regulator/max8660.c
33803+++ b/drivers/regulator/max8660.c
33804@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
33805 max8660->shadow_regs[MAX8660_OVER1] = 5;
33806 } else {
33807 /* Otherwise devices can be toggled via software */
33808- max8660_dcdc_ops.enable = max8660_dcdc_enable;
33809- max8660_dcdc_ops.disable = max8660_dcdc_disable;
33810+ pax_open_kernel();
33811+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
33812+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
33813+ pax_close_kernel();
33814 }
33815
33816 /*
33817diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
33818index 023d17d..74ef35b 100644
33819--- a/drivers/regulator/mc13892-regulator.c
33820+++ b/drivers/regulator/mc13892-regulator.c
33821@@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
33822 }
33823 mc13xxx_unlock(mc13892);
33824
33825- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33826+ pax_open_kernel();
33827+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33828 = mc13892_vcam_set_mode;
33829- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33830+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33831 = mc13892_vcam_get_mode;
33832+ pax_close_kernel();
33833 for (i = 0; i < pdata->num_regulators; i++) {
33834 init_data = &pdata->regulators[i];
33835 priv->regulators[i] = regulator_register(
33836diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
33837index cace6d3..f623fda 100644
33838--- a/drivers/rtc/rtc-dev.c
33839+++ b/drivers/rtc/rtc-dev.c
33840@@ -14,6 +14,7 @@
33841 #include <linux/module.h>
33842 #include <linux/rtc.h>
33843 #include <linux/sched.h>
33844+#include <linux/grsecurity.h>
33845 #include "rtc-core.h"
33846
33847 static dev_t rtc_devt;
33848@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
33849 if (copy_from_user(&tm, uarg, sizeof(tm)))
33850 return -EFAULT;
33851
33852+ gr_log_timechange();
33853+
33854 return rtc_set_time(rtc, &tm);
33855
33856 case RTC_PIE_ON:
33857diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
33858index ffb5878..e6d785c 100644
33859--- a/drivers/scsi/aacraid/aacraid.h
33860+++ b/drivers/scsi/aacraid/aacraid.h
33861@@ -492,7 +492,7 @@ struct adapter_ops
33862 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33863 /* Administrative operations */
33864 int (*adapter_comm)(struct aac_dev * dev, int comm);
33865-};
33866+} __no_const;
33867
33868 /*
33869 * Define which interrupt handler needs to be installed
33870diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
33871index 705e13e..91c873c 100644
33872--- a/drivers/scsi/aacraid/linit.c
33873+++ b/drivers/scsi/aacraid/linit.c
33874@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
33875 #elif defined(__devinitconst)
33876 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33877 #else
33878-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
33879+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33880 #endif
33881 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
33882 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
33883diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
33884index d5ff142..49c0ebb 100644
33885--- a/drivers/scsi/aic94xx/aic94xx_init.c
33886+++ b/drivers/scsi/aic94xx/aic94xx_init.c
33887@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
33888 .lldd_control_phy = asd_control_phy,
33889 };
33890
33891-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
33892+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
33893 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
33894 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
33895 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
33896diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
33897index a796de9..1ef20e1 100644
33898--- a/drivers/scsi/bfa/bfa.h
33899+++ b/drivers/scsi/bfa/bfa.h
33900@@ -196,7 +196,7 @@ struct bfa_hwif_s {
33901 u32 *end);
33902 int cpe_vec_q0;
33903 int rme_vec_q0;
33904-};
33905+} __no_const;
33906 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
33907
33908 struct bfa_faa_cbfn_s {
33909diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
33910index e07bd47..cd1bbbb 100644
33911--- a/drivers/scsi/bfa/bfa_fcpim.c
33912+++ b/drivers/scsi/bfa/bfa_fcpim.c
33913@@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
33914
33915 bfa_iotag_attach(fcp);
33916
33917- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
33918+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
33919 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
33920 (fcp->num_itns * sizeof(struct bfa_itn_s));
33921 memset(fcp->itn_arr, 0,
33922@@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
33923 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
33924 {
33925 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
33926- struct bfa_itn_s *itn;
33927+ bfa_itn_s_no_const *itn;
33928
33929 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
33930 itn->isr = isr;
33931diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
33932index 1080bcb..a3b39e3 100644
33933--- a/drivers/scsi/bfa/bfa_fcpim.h
33934+++ b/drivers/scsi/bfa/bfa_fcpim.h
33935@@ -37,6 +37,7 @@ struct bfa_iotag_s {
33936 struct bfa_itn_s {
33937 bfa_isr_func_t isr;
33938 };
33939+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
33940
33941 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
33942 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
33943@@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
33944 struct list_head iotag_tio_free_q; /* free IO resources */
33945 struct list_head iotag_unused_q; /* unused IO resources*/
33946 struct bfa_iotag_s *iotag_arr;
33947- struct bfa_itn_s *itn_arr;
33948+ bfa_itn_s_no_const *itn_arr;
33949 int num_ioim_reqs;
33950 int num_fwtio_reqs;
33951 int num_itns;
33952diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
33953index 546d46b..642fa5b 100644
33954--- a/drivers/scsi/bfa/bfa_ioc.h
33955+++ b/drivers/scsi/bfa/bfa_ioc.h
33956@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
33957 bfa_ioc_disable_cbfn_t disable_cbfn;
33958 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
33959 bfa_ioc_reset_cbfn_t reset_cbfn;
33960-};
33961+} __no_const;
33962
33963 /*
33964 * IOC event notification mechanism.
33965@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
33966 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
33967 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
33968 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
33969-};
33970+} __no_const;
33971
33972 /*
33973 * Queue element to wait for room in request queue. FIFO order is
33974diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
33975index 351dc0b..951dc32 100644
33976--- a/drivers/scsi/hosts.c
33977+++ b/drivers/scsi/hosts.c
33978@@ -42,7 +42,7 @@
33979 #include "scsi_logging.h"
33980
33981
33982-static atomic_t scsi_host_next_hn; /* host_no for next new host */
33983+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33984
33985
33986 static void scsi_host_cls_release(struct device *dev)
33987@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
33988 * subtract one because we increment first then return, but we need to
33989 * know what the next host number was before increment
33990 */
33991- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33992+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33993 shost->dma_channel = 0xff;
33994
33995 /* These three are default values which can be overridden */
33996diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
33997index 865d452..e9b7fa7 100644
33998--- a/drivers/scsi/hpsa.c
33999+++ b/drivers/scsi/hpsa.c
34000@@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
34001 u32 a;
34002
34003 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
34004- return h->access.command_completed(h);
34005+ return h->access->command_completed(h);
34006
34007 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
34008 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
34009@@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h)
34010 while (!list_empty(&h->reqQ)) {
34011 c = list_entry(h->reqQ.next, struct CommandList, list);
34012 /* can't do anything if fifo is full */
34013- if ((h->access.fifo_full(h))) {
34014+ if ((h->access->fifo_full(h))) {
34015 dev_warn(&h->pdev->dev, "fifo full\n");
34016 break;
34017 }
34018@@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h)
34019 h->Qdepth--;
34020
34021 /* Tell the controller execute command */
34022- h->access.submit_command(h, c);
34023+ h->access->submit_command(h, c);
34024
34025 /* Put job onto the completed Q */
34026 addQ(&h->cmpQ, c);
34027@@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h)
34028
34029 static inline unsigned long get_next_completion(struct ctlr_info *h)
34030 {
34031- return h->access.command_completed(h);
34032+ return h->access->command_completed(h);
34033 }
34034
34035 static inline bool interrupt_pending(struct ctlr_info *h)
34036 {
34037- return h->access.intr_pending(h);
34038+ return h->access->intr_pending(h);
34039 }
34040
34041 static inline long interrupt_not_for_us(struct ctlr_info *h)
34042 {
34043- return (h->access.intr_pending(h) == 0) ||
34044+ return (h->access->intr_pending(h) == 0) ||
34045 (h->interrupts_enabled == 0);
34046 }
34047
34048@@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
34049 if (prod_index < 0)
34050 return -ENODEV;
34051 h->product_name = products[prod_index].product_name;
34052- h->access = *(products[prod_index].access);
34053+ h->access = products[prod_index].access;
34054
34055 if (hpsa_board_disabled(h->pdev)) {
34056 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
34057@@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
34058
34059 assert_spin_locked(&lockup_detector_lock);
34060 remove_ctlr_from_lockup_detector_list(h);
34061- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34062+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34063 spin_lock_irqsave(&h->lock, flags);
34064 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
34065 spin_unlock_irqrestore(&h->lock, flags);
34066@@ -4340,7 +4340,7 @@ reinit_after_soft_reset:
34067 }
34068
34069 /* make sure the board interrupts are off */
34070- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34071+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34072
34073 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
34074 goto clean2;
34075@@ -4374,7 +4374,7 @@ reinit_after_soft_reset:
34076 * fake ones to scoop up any residual completions.
34077 */
34078 spin_lock_irqsave(&h->lock, flags);
34079- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34080+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34081 spin_unlock_irqrestore(&h->lock, flags);
34082 free_irq(h->intr[h->intr_mode], h);
34083 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
34084@@ -4393,9 +4393,9 @@ reinit_after_soft_reset:
34085 dev_info(&h->pdev->dev, "Board READY.\n");
34086 dev_info(&h->pdev->dev,
34087 "Waiting for stale completions to drain.\n");
34088- h->access.set_intr_mask(h, HPSA_INTR_ON);
34089+ h->access->set_intr_mask(h, HPSA_INTR_ON);
34090 msleep(10000);
34091- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34092+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34093
34094 rc = controller_reset_failed(h->cfgtable);
34095 if (rc)
34096@@ -4416,7 +4416,7 @@ reinit_after_soft_reset:
34097 }
34098
34099 /* Turn the interrupts on so we can service requests */
34100- h->access.set_intr_mask(h, HPSA_INTR_ON);
34101+ h->access->set_intr_mask(h, HPSA_INTR_ON);
34102
34103 hpsa_hba_inquiry(h);
34104 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
34105@@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
34106 * To write all data in the battery backed cache to disks
34107 */
34108 hpsa_flush_cache(h);
34109- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34110+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34111 free_irq(h->intr[h->intr_mode], h);
34112 #ifdef CONFIG_PCI_MSI
34113 if (h->msix_vector)
34114@@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
34115 return;
34116 }
34117 /* Change the access methods to the performant access methods */
34118- h->access = SA5_performant_access;
34119+ h->access = &SA5_performant_access;
34120 h->transMethod = CFGTBL_Trans_Performant;
34121 }
34122
34123diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
34124index 91edafb..a9b88ec 100644
34125--- a/drivers/scsi/hpsa.h
34126+++ b/drivers/scsi/hpsa.h
34127@@ -73,7 +73,7 @@ struct ctlr_info {
34128 unsigned int msix_vector;
34129 unsigned int msi_vector;
34130 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
34131- struct access_method access;
34132+ struct access_method *access;
34133
34134 /* queue and queue Info */
34135 struct list_head reqQ;
34136diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
34137index f2df059..a3a9930 100644
34138--- a/drivers/scsi/ips.h
34139+++ b/drivers/scsi/ips.h
34140@@ -1027,7 +1027,7 @@ typedef struct {
34141 int (*intr)(struct ips_ha *);
34142 void (*enableint)(struct ips_ha *);
34143 uint32_t (*statupd)(struct ips_ha *);
34144-} ips_hw_func_t;
34145+} __no_const ips_hw_func_t;
34146
34147 typedef struct ips_ha {
34148 uint8_t ha_id[IPS_MAX_CHANNELS+1];
34149diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
34150index 9de9db2..1e09660 100644
34151--- a/drivers/scsi/libfc/fc_exch.c
34152+++ b/drivers/scsi/libfc/fc_exch.c
34153@@ -105,12 +105,12 @@ struct fc_exch_mgr {
34154 * all together if not used XXX
34155 */
34156 struct {
34157- atomic_t no_free_exch;
34158- atomic_t no_free_exch_xid;
34159- atomic_t xid_not_found;
34160- atomic_t xid_busy;
34161- atomic_t seq_not_found;
34162- atomic_t non_bls_resp;
34163+ atomic_unchecked_t no_free_exch;
34164+ atomic_unchecked_t no_free_exch_xid;
34165+ atomic_unchecked_t xid_not_found;
34166+ atomic_unchecked_t xid_busy;
34167+ atomic_unchecked_t seq_not_found;
34168+ atomic_unchecked_t non_bls_resp;
34169 } stats;
34170 };
34171
34172@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
34173 /* allocate memory for exchange */
34174 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34175 if (!ep) {
34176- atomic_inc(&mp->stats.no_free_exch);
34177+ atomic_inc_unchecked(&mp->stats.no_free_exch);
34178 goto out;
34179 }
34180 memset(ep, 0, sizeof(*ep));
34181@@ -780,7 +780,7 @@ out:
34182 return ep;
34183 err:
34184 spin_unlock_bh(&pool->lock);
34185- atomic_inc(&mp->stats.no_free_exch_xid);
34186+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34187 mempool_free(ep, mp->ep_pool);
34188 return NULL;
34189 }
34190@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34191 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34192 ep = fc_exch_find(mp, xid);
34193 if (!ep) {
34194- atomic_inc(&mp->stats.xid_not_found);
34195+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34196 reject = FC_RJT_OX_ID;
34197 goto out;
34198 }
34199@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34200 ep = fc_exch_find(mp, xid);
34201 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34202 if (ep) {
34203- atomic_inc(&mp->stats.xid_busy);
34204+ atomic_inc_unchecked(&mp->stats.xid_busy);
34205 reject = FC_RJT_RX_ID;
34206 goto rel;
34207 }
34208@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34209 }
34210 xid = ep->xid; /* get our XID */
34211 } else if (!ep) {
34212- atomic_inc(&mp->stats.xid_not_found);
34213+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34214 reject = FC_RJT_RX_ID; /* XID not found */
34215 goto out;
34216 }
34217@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34218 } else {
34219 sp = &ep->seq;
34220 if (sp->id != fh->fh_seq_id) {
34221- atomic_inc(&mp->stats.seq_not_found);
34222+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34223 if (f_ctl & FC_FC_END_SEQ) {
34224 /*
34225 * Update sequence_id based on incoming last
34226@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34227
34228 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34229 if (!ep) {
34230- atomic_inc(&mp->stats.xid_not_found);
34231+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34232 goto out;
34233 }
34234 if (ep->esb_stat & ESB_ST_COMPLETE) {
34235- atomic_inc(&mp->stats.xid_not_found);
34236+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34237 goto rel;
34238 }
34239 if (ep->rxid == FC_XID_UNKNOWN)
34240 ep->rxid = ntohs(fh->fh_rx_id);
34241 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34242- atomic_inc(&mp->stats.xid_not_found);
34243+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34244 goto rel;
34245 }
34246 if (ep->did != ntoh24(fh->fh_s_id) &&
34247 ep->did != FC_FID_FLOGI) {
34248- atomic_inc(&mp->stats.xid_not_found);
34249+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34250 goto rel;
34251 }
34252 sof = fr_sof(fp);
34253@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34254 sp->ssb_stat |= SSB_ST_RESP;
34255 sp->id = fh->fh_seq_id;
34256 } else if (sp->id != fh->fh_seq_id) {
34257- atomic_inc(&mp->stats.seq_not_found);
34258+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34259 goto rel;
34260 }
34261
34262@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34263 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34264
34265 if (!sp)
34266- atomic_inc(&mp->stats.xid_not_found);
34267+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34268 else
34269- atomic_inc(&mp->stats.non_bls_resp);
34270+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
34271
34272 fc_frame_free(fp);
34273 }
34274diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
34275index db9238f..4378ed2 100644
34276--- a/drivers/scsi/libsas/sas_ata.c
34277+++ b/drivers/scsi/libsas/sas_ata.c
34278@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
34279 .postreset = ata_std_postreset,
34280 .error_handler = ata_std_error_handler,
34281 .post_internal_cmd = sas_ata_post_internal,
34282- .qc_defer = ata_std_qc_defer,
34283+ .qc_defer = ata_std_qc_defer,
34284 .qc_prep = ata_noop_qc_prep,
34285 .qc_issue = sas_ata_qc_issue,
34286 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34287diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
34288index bb4c8e0..f33d849 100644
34289--- a/drivers/scsi/lpfc/lpfc.h
34290+++ b/drivers/scsi/lpfc/lpfc.h
34291@@ -425,7 +425,7 @@ struct lpfc_vport {
34292 struct dentry *debug_nodelist;
34293 struct dentry *vport_debugfs_root;
34294 struct lpfc_debugfs_trc *disc_trc;
34295- atomic_t disc_trc_cnt;
34296+ atomic_unchecked_t disc_trc_cnt;
34297 #endif
34298 uint8_t stat_data_enabled;
34299 uint8_t stat_data_blocked;
34300@@ -835,8 +835,8 @@ struct lpfc_hba {
34301 struct timer_list fabric_block_timer;
34302 unsigned long bit_flags;
34303 #define FABRIC_COMANDS_BLOCKED 0
34304- atomic_t num_rsrc_err;
34305- atomic_t num_cmd_success;
34306+ atomic_unchecked_t num_rsrc_err;
34307+ atomic_unchecked_t num_cmd_success;
34308 unsigned long last_rsrc_error_time;
34309 unsigned long last_ramp_down_time;
34310 unsigned long last_ramp_up_time;
34311@@ -866,7 +866,7 @@ struct lpfc_hba {
34312
34313 struct dentry *debug_slow_ring_trc;
34314 struct lpfc_debugfs_trc *slow_ring_trc;
34315- atomic_t slow_ring_trc_cnt;
34316+ atomic_unchecked_t slow_ring_trc_cnt;
34317 /* iDiag debugfs sub-directory */
34318 struct dentry *idiag_root;
34319 struct dentry *idiag_pci_cfg;
34320diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
34321index 2838259..a07cfb5 100644
34322--- a/drivers/scsi/lpfc/lpfc_debugfs.c
34323+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
34324@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
34325
34326 #include <linux/debugfs.h>
34327
34328-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34329+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34330 static unsigned long lpfc_debugfs_start_time = 0L;
34331
34332 /* iDiag */
34333@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
34334 lpfc_debugfs_enable = 0;
34335
34336 len = 0;
34337- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34338+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34339 (lpfc_debugfs_max_disc_trc - 1);
34340 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34341 dtp = vport->disc_trc + i;
34342@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
34343 lpfc_debugfs_enable = 0;
34344
34345 len = 0;
34346- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34347+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34348 (lpfc_debugfs_max_slow_ring_trc - 1);
34349 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34350 dtp = phba->slow_ring_trc + i;
34351@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
34352 !vport || !vport->disc_trc)
34353 return;
34354
34355- index = atomic_inc_return(&vport->disc_trc_cnt) &
34356+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34357 (lpfc_debugfs_max_disc_trc - 1);
34358 dtp = vport->disc_trc + index;
34359 dtp->fmt = fmt;
34360 dtp->data1 = data1;
34361 dtp->data2 = data2;
34362 dtp->data3 = data3;
34363- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34364+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34365 dtp->jif = jiffies;
34366 #endif
34367 return;
34368@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
34369 !phba || !phba->slow_ring_trc)
34370 return;
34371
34372- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34373+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34374 (lpfc_debugfs_max_slow_ring_trc - 1);
34375 dtp = phba->slow_ring_trc + index;
34376 dtp->fmt = fmt;
34377 dtp->data1 = data1;
34378 dtp->data2 = data2;
34379 dtp->data3 = data3;
34380- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34381+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34382 dtp->jif = jiffies;
34383 #endif
34384 return;
34385@@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34386 "slow_ring buffer\n");
34387 goto debug_failed;
34388 }
34389- atomic_set(&phba->slow_ring_trc_cnt, 0);
34390+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34391 memset(phba->slow_ring_trc, 0,
34392 (sizeof(struct lpfc_debugfs_trc) *
34393 lpfc_debugfs_max_slow_ring_trc));
34394@@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34395 "buffer\n");
34396 goto debug_failed;
34397 }
34398- atomic_set(&vport->disc_trc_cnt, 0);
34399+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34400
34401 snprintf(name, sizeof(name), "discovery_trace");
34402 vport->debug_disc_trc =
34403diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
34404index 55bc4fc..a2a109c 100644
34405--- a/drivers/scsi/lpfc/lpfc_init.c
34406+++ b/drivers/scsi/lpfc/lpfc_init.c
34407@@ -10027,8 +10027,10 @@ lpfc_init(void)
34408 printk(LPFC_COPYRIGHT "\n");
34409
34410 if (lpfc_enable_npiv) {
34411- lpfc_transport_functions.vport_create = lpfc_vport_create;
34412- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34413+ pax_open_kernel();
34414+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34415+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34416+ pax_close_kernel();
34417 }
34418 lpfc_transport_template =
34419 fc_attach_transport(&lpfc_transport_functions);
34420diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
34421index 2e1e54e..1af0a0d 100644
34422--- a/drivers/scsi/lpfc/lpfc_scsi.c
34423+++ b/drivers/scsi/lpfc/lpfc_scsi.c
34424@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
34425 uint32_t evt_posted;
34426
34427 spin_lock_irqsave(&phba->hbalock, flags);
34428- atomic_inc(&phba->num_rsrc_err);
34429+ atomic_inc_unchecked(&phba->num_rsrc_err);
34430 phba->last_rsrc_error_time = jiffies;
34431
34432 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34433@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
34434 unsigned long flags;
34435 struct lpfc_hba *phba = vport->phba;
34436 uint32_t evt_posted;
34437- atomic_inc(&phba->num_cmd_success);
34438+ atomic_inc_unchecked(&phba->num_cmd_success);
34439
34440 if (vport->cfg_lun_queue_depth <= queue_depth)
34441 return;
34442@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34443 unsigned long num_rsrc_err, num_cmd_success;
34444 int i;
34445
34446- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34447- num_cmd_success = atomic_read(&phba->num_cmd_success);
34448+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34449+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34450
34451 vports = lpfc_create_vport_work_array(phba);
34452 if (vports != NULL)
34453@@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34454 }
34455 }
34456 lpfc_destroy_vport_work_array(phba, vports);
34457- atomic_set(&phba->num_rsrc_err, 0);
34458- atomic_set(&phba->num_cmd_success, 0);
34459+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34460+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34461 }
34462
34463 /**
34464@@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
34465 }
34466 }
34467 lpfc_destroy_vport_work_array(phba, vports);
34468- atomic_set(&phba->num_rsrc_err, 0);
34469- atomic_set(&phba->num_cmd_success, 0);
34470+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34471+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34472 }
34473
34474 /**
34475diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
34476index 5163edb..7b142bc 100644
34477--- a/drivers/scsi/pmcraid.c
34478+++ b/drivers/scsi/pmcraid.c
34479@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
34480 res->scsi_dev = scsi_dev;
34481 scsi_dev->hostdata = res;
34482 res->change_detected = 0;
34483- atomic_set(&res->read_failures, 0);
34484- atomic_set(&res->write_failures, 0);
34485+ atomic_set_unchecked(&res->read_failures, 0);
34486+ atomic_set_unchecked(&res->write_failures, 0);
34487 rc = 0;
34488 }
34489 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34490@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
34491
34492 /* If this was a SCSI read/write command keep count of errors */
34493 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34494- atomic_inc(&res->read_failures);
34495+ atomic_inc_unchecked(&res->read_failures);
34496 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34497- atomic_inc(&res->write_failures);
34498+ atomic_inc_unchecked(&res->write_failures);
34499
34500 if (!RES_IS_GSCSI(res->cfg_entry) &&
34501 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34502@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
34503 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34504 * hrrq_id assigned here in queuecommand
34505 */
34506- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34507+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34508 pinstance->num_hrrq;
34509 cmd->cmd_done = pmcraid_io_done;
34510
34511@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
34512 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34513 * hrrq_id assigned here in queuecommand
34514 */
34515- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34516+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34517 pinstance->num_hrrq;
34518
34519 if (request_size) {
34520@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
34521
34522 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34523 /* add resources only after host is added into system */
34524- if (!atomic_read(&pinstance->expose_resources))
34525+ if (!atomic_read_unchecked(&pinstance->expose_resources))
34526 return;
34527
34528 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34529@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
34530 init_waitqueue_head(&pinstance->reset_wait_q);
34531
34532 atomic_set(&pinstance->outstanding_cmds, 0);
34533- atomic_set(&pinstance->last_message_id, 0);
34534- atomic_set(&pinstance->expose_resources, 0);
34535+ atomic_set_unchecked(&pinstance->last_message_id, 0);
34536+ atomic_set_unchecked(&pinstance->expose_resources, 0);
34537
34538 INIT_LIST_HEAD(&pinstance->free_res_q);
34539 INIT_LIST_HEAD(&pinstance->used_res_q);
34540@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
34541 /* Schedule worker thread to handle CCN and take care of adding and
34542 * removing devices to OS
34543 */
34544- atomic_set(&pinstance->expose_resources, 1);
34545+ atomic_set_unchecked(&pinstance->expose_resources, 1);
34546 schedule_work(&pinstance->worker_q);
34547 return rc;
34548
34549diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
34550index ca496c7..9c791d5 100644
34551--- a/drivers/scsi/pmcraid.h
34552+++ b/drivers/scsi/pmcraid.h
34553@@ -748,7 +748,7 @@ struct pmcraid_instance {
34554 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34555
34556 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34557- atomic_t last_message_id;
34558+ atomic_unchecked_t last_message_id;
34559
34560 /* configuration table */
34561 struct pmcraid_config_table *cfg_table;
34562@@ -777,7 +777,7 @@ struct pmcraid_instance {
34563 atomic_t outstanding_cmds;
34564
34565 /* should add/delete resources to mid-layer now ?*/
34566- atomic_t expose_resources;
34567+ atomic_unchecked_t expose_resources;
34568
34569
34570
34571@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
34572 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34573 };
34574 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34575- atomic_t read_failures; /* count of failed READ commands */
34576- atomic_t write_failures; /* count of failed WRITE commands */
34577+ atomic_unchecked_t read_failures; /* count of failed READ commands */
34578+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34579
34580 /* To indicate add/delete/modify during CCN */
34581 u8 change_detected;
34582diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
34583index fcf052c..a8025a4 100644
34584--- a/drivers/scsi/qla2xxx/qla_def.h
34585+++ b/drivers/scsi/qla2xxx/qla_def.h
34586@@ -2244,7 +2244,7 @@ struct isp_operations {
34587 int (*get_flash_version) (struct scsi_qla_host *, void *);
34588 int (*start_scsi) (srb_t *);
34589 int (*abort_isp) (struct scsi_qla_host *);
34590-};
34591+} __no_const;
34592
34593 /* MSI-X Support *************************************************************/
34594
34595diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
34596index fd5edc6..4906148 100644
34597--- a/drivers/scsi/qla4xxx/ql4_def.h
34598+++ b/drivers/scsi/qla4xxx/ql4_def.h
34599@@ -258,7 +258,7 @@ struct ddb_entry {
34600 * (4000 only) */
34601 atomic_t relogin_timer; /* Max Time to wait for
34602 * relogin to complete */
34603- atomic_t relogin_retry_count; /* Num of times relogin has been
34604+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34605 * retried */
34606 uint32_t default_time2wait; /* Default Min time between
34607 * relogins (+aens) */
34608diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
34609index 4169c8b..a8b896b 100644
34610--- a/drivers/scsi/qla4xxx/ql4_os.c
34611+++ b/drivers/scsi/qla4xxx/ql4_os.c
34612@@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
34613 */
34614 if (!iscsi_is_session_online(cls_sess)) {
34615 /* Reset retry relogin timer */
34616- atomic_inc(&ddb_entry->relogin_retry_count);
34617+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34618 DEBUG2(ql4_printk(KERN_INFO, ha,
34619 "%s: index[%d] relogin timed out-retrying"
34620 " relogin (%d), retry (%d)\n", __func__,
34621 ddb_entry->fw_ddb_index,
34622- atomic_read(&ddb_entry->relogin_retry_count),
34623+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
34624 ddb_entry->default_time2wait + 4));
34625 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
34626 atomic_set(&ddb_entry->retry_relogin_timer,
34627@@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
34628
34629 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34630 atomic_set(&ddb_entry->relogin_timer, 0);
34631- atomic_set(&ddb_entry->relogin_retry_count, 0);
34632+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34633
34634 ddb_entry->default_relogin_timeout =
34635 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
34636diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
34637index 2aeb2e9..46e3925 100644
34638--- a/drivers/scsi/scsi.c
34639+++ b/drivers/scsi/scsi.c
34640@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
34641 unsigned long timeout;
34642 int rtn = 0;
34643
34644- atomic_inc(&cmd->device->iorequest_cnt);
34645+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34646
34647 /* check if the device is still usable */
34648 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34649diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
34650index f85cfa6..a57c9e8 100644
34651--- a/drivers/scsi/scsi_lib.c
34652+++ b/drivers/scsi/scsi_lib.c
34653@@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
34654 shost = sdev->host;
34655 scsi_init_cmd_errh(cmd);
34656 cmd->result = DID_NO_CONNECT << 16;
34657- atomic_inc(&cmd->device->iorequest_cnt);
34658+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34659
34660 /*
34661 * SCSI request completion path will do scsi_device_unbusy(),
34662@@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq)
34663
34664 INIT_LIST_HEAD(&cmd->eh_entry);
34665
34666- atomic_inc(&cmd->device->iodone_cnt);
34667+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
34668 if (cmd->result)
34669- atomic_inc(&cmd->device->ioerr_cnt);
34670+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34671
34672 disposition = scsi_decide_disposition(cmd);
34673 if (disposition != SUCCESS &&
34674diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
34675index 04c2a27..9d8bd66 100644
34676--- a/drivers/scsi/scsi_sysfs.c
34677+++ b/drivers/scsi/scsi_sysfs.c
34678@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
34679 char *buf) \
34680 { \
34681 struct scsi_device *sdev = to_scsi_device(dev); \
34682- unsigned long long count = atomic_read(&sdev->field); \
34683+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
34684 return snprintf(buf, 20, "0x%llx\n", count); \
34685 } \
34686 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34687diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
34688index 84a1fdf..693b0d6 100644
34689--- a/drivers/scsi/scsi_tgt_lib.c
34690+++ b/drivers/scsi/scsi_tgt_lib.c
34691@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
34692 int err;
34693
34694 dprintk("%lx %u\n", uaddr, len);
34695- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34696+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34697 if (err) {
34698 /*
34699 * TODO: need to fixup sg_tablesize, max_segment_size,
34700diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
34701index 1b21491..1b7f60e 100644
34702--- a/drivers/scsi/scsi_transport_fc.c
34703+++ b/drivers/scsi/scsi_transport_fc.c
34704@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
34705 * Netlink Infrastructure
34706 */
34707
34708-static atomic_t fc_event_seq;
34709+static atomic_unchecked_t fc_event_seq;
34710
34711 /**
34712 * fc_get_event_number - Obtain the next sequential FC event number
34713@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34714 u32
34715 fc_get_event_number(void)
34716 {
34717- return atomic_add_return(1, &fc_event_seq);
34718+ return atomic_add_return_unchecked(1, &fc_event_seq);
34719 }
34720 EXPORT_SYMBOL(fc_get_event_number);
34721
34722@@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
34723 {
34724 int error;
34725
34726- atomic_set(&fc_event_seq, 0);
34727+ atomic_set_unchecked(&fc_event_seq, 0);
34728
34729 error = transport_class_register(&fc_host_class);
34730 if (error)
34731@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
34732 char *cp;
34733
34734 *val = simple_strtoul(buf, &cp, 0);
34735- if ((*cp && (*cp != '\n')) || (*val < 0))
34736+ if (*cp && (*cp != '\n'))
34737 return -EINVAL;
34738 /*
34739 * Check for overflow; dev_loss_tmo is u32
34740diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
34741index 96029e6..4d77fa0 100644
34742--- a/drivers/scsi/scsi_transport_iscsi.c
34743+++ b/drivers/scsi/scsi_transport_iscsi.c
34744@@ -79,7 +79,7 @@ struct iscsi_internal {
34745 struct transport_container session_cont;
34746 };
34747
34748-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34749+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34750 static struct workqueue_struct *iscsi_eh_timer_workq;
34751
34752 static DEFINE_IDA(iscsi_sess_ida);
34753@@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
34754 int err;
34755
34756 ihost = shost->shost_data;
34757- session->sid = atomic_add_return(1, &iscsi_session_nr);
34758+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34759
34760 if (target_id == ISCSI_MAX_TARGET) {
34761 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
34762@@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
34763 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34764 ISCSI_TRANSPORT_VERSION);
34765
34766- atomic_set(&iscsi_session_nr, 0);
34767+ atomic_set_unchecked(&iscsi_session_nr, 0);
34768
34769 err = class_register(&iscsi_transport_class);
34770 if (err)
34771diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
34772index 21a045e..ec89e03 100644
34773--- a/drivers/scsi/scsi_transport_srp.c
34774+++ b/drivers/scsi/scsi_transport_srp.c
34775@@ -33,7 +33,7 @@
34776 #include "scsi_transport_srp_internal.h"
34777
34778 struct srp_host_attrs {
34779- atomic_t next_port_id;
34780+ atomic_unchecked_t next_port_id;
34781 };
34782 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34783
34784@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
34785 struct Scsi_Host *shost = dev_to_shost(dev);
34786 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34787
34788- atomic_set(&srp_host->next_port_id, 0);
34789+ atomic_set_unchecked(&srp_host->next_port_id, 0);
34790 return 0;
34791 }
34792
34793@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
34794 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34795 rport->roles = ids->roles;
34796
34797- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34798+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34799 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34800
34801 transport_setup_device(&rport->dev);
34802diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
34803index 441a1c5..07cece7 100644
34804--- a/drivers/scsi/sg.c
34805+++ b/drivers/scsi/sg.c
34806@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
34807 sdp->disk->disk_name,
34808 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34809 NULL,
34810- (char *)arg);
34811+ (char __user *)arg);
34812 case BLKTRACESTART:
34813 return blk_trace_startstop(sdp->device->request_queue, 1);
34814 case BLKTRACESTOP:
34815@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
34816 const struct file_operations * fops;
34817 };
34818
34819-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34820+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34821 {"allow_dio", &adio_fops},
34822 {"debug", &debug_fops},
34823 {"def_reserved_size", &dressz_fops},
34824@@ -2327,7 +2327,7 @@ sg_proc_init(void)
34825 {
34826 int k, mask;
34827 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34828- struct sg_proc_leaf * leaf;
34829+ const struct sg_proc_leaf * leaf;
34830
34831 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34832 if (!sg_proc_sgp)
34833diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
34834index f64250e..1ee3049 100644
34835--- a/drivers/spi/spi-dw-pci.c
34836+++ b/drivers/spi/spi-dw-pci.c
34837@@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
34838 #define spi_resume NULL
34839 #endif
34840
34841-static const struct pci_device_id pci_ids[] __devinitdata = {
34842+static const struct pci_device_id pci_ids[] __devinitconst = {
34843 /* Intel MID platform SPI controller 0 */
34844 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34845 {},
34846diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
34847index 77eae99..b7cdcc9 100644
34848--- a/drivers/spi/spi.c
34849+++ b/drivers/spi/spi.c
34850@@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
34851 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34852
34853 /* portable code must never pass more than 32 bytes */
34854-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34855+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34856
34857 static u8 *buf;
34858
34859diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
34860index 436fe97..4082570 100644
34861--- a/drivers/staging/gma500/power.c
34862+++ b/drivers/staging/gma500/power.c
34863@@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
34864 ret = gma_resume_pci(dev->pdev);
34865 if (ret == 0) {
34866 /* FIXME: we want to defer this for Medfield/Oaktrail */
34867- gma_resume_display(dev);
34868+ gma_resume_display(dev->pdev);
34869 psb_irq_preinstall(dev);
34870 psb_irq_postinstall(dev);
34871 pm_runtime_get(&dev->pdev->dev);
34872diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
34873index bafccb3..e3ac78d 100644
34874--- a/drivers/staging/hv/rndis_filter.c
34875+++ b/drivers/staging/hv/rndis_filter.c
34876@@ -42,7 +42,7 @@ struct rndis_device {
34877
34878 enum rndis_device_state state;
34879 bool link_state;
34880- atomic_t new_req_id;
34881+ atomic_unchecked_t new_req_id;
34882
34883 spinlock_t request_lock;
34884 struct list_head req_list;
34885@@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
34886 * template
34887 */
34888 set = &rndis_msg->msg.set_req;
34889- set->req_id = atomic_inc_return(&dev->new_req_id);
34890+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34891
34892 /* Add to the request list */
34893 spin_lock_irqsave(&dev->request_lock, flags);
34894@@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
34895
34896 /* Setup the rndis set */
34897 halt = &request->request_msg.msg.halt_req;
34898- halt->req_id = atomic_inc_return(&dev->new_req_id);
34899+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34900
34901 /* Ignore return since this msg is optional. */
34902 rndis_filter_send_request(dev, request);
34903diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
34904index 9e8f010..af9efb5 100644
34905--- a/drivers/staging/iio/buffer_generic.h
34906+++ b/drivers/staging/iio/buffer_generic.h
34907@@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
34908
34909 int (*is_enabled)(struct iio_buffer *buffer);
34910 int (*enable)(struct iio_buffer *buffer);
34911-};
34912+} __no_const;
34913
34914 /**
34915 * struct iio_buffer_setup_ops - buffer setup related callbacks
34916diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
34917index 8b307b4..a97ac91 100644
34918--- a/drivers/staging/octeon/ethernet-rx.c
34919+++ b/drivers/staging/octeon/ethernet-rx.c
34920@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
34921 /* Increment RX stats for virtual ports */
34922 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34923 #ifdef CONFIG_64BIT
34924- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34925- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34926+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34927+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34928 #else
34929- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34930- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34931+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34932+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
34933 #endif
34934 }
34935 netif_receive_skb(skb);
34936@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
34937 dev->name);
34938 */
34939 #ifdef CONFIG_64BIT
34940- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
34941+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34942 #else
34943- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
34944+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
34945 #endif
34946 dev_kfree_skb_irq(skb);
34947 }
34948diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
34949index 076f866..2308070 100644
34950--- a/drivers/staging/octeon/ethernet.c
34951+++ b/drivers/staging/octeon/ethernet.c
34952@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
34953 * since the RX tasklet also increments it.
34954 */
34955 #ifdef CONFIG_64BIT
34956- atomic64_add(rx_status.dropped_packets,
34957- (atomic64_t *)&priv->stats.rx_dropped);
34958+ atomic64_add_unchecked(rx_status.dropped_packets,
34959+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34960 #else
34961- atomic_add(rx_status.dropped_packets,
34962- (atomic_t *)&priv->stats.rx_dropped);
34963+ atomic_add_unchecked(rx_status.dropped_packets,
34964+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
34965 #endif
34966 }
34967
34968diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
34969index 7a19555..466456d 100644
34970--- a/drivers/staging/pohmelfs/inode.c
34971+++ b/drivers/staging/pohmelfs/inode.c
34972@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
34973 mutex_init(&psb->mcache_lock);
34974 psb->mcache_root = RB_ROOT;
34975 psb->mcache_timeout = msecs_to_jiffies(5000);
34976- atomic_long_set(&psb->mcache_gen, 0);
34977+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
34978
34979 psb->trans_max_pages = 100;
34980
34981@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
34982 INIT_LIST_HEAD(&psb->crypto_ready_list);
34983 INIT_LIST_HEAD(&psb->crypto_active_list);
34984
34985- atomic_set(&psb->trans_gen, 1);
34986+ atomic_set_unchecked(&psb->trans_gen, 1);
34987 atomic_long_set(&psb->total_inodes, 0);
34988
34989 mutex_init(&psb->state_lock);
34990diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
34991index e22665c..a2a9390 100644
34992--- a/drivers/staging/pohmelfs/mcache.c
34993+++ b/drivers/staging/pohmelfs/mcache.c
34994@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
34995 m->data = data;
34996 m->start = start;
34997 m->size = size;
34998- m->gen = atomic_long_inc_return(&psb->mcache_gen);
34999+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35000
35001 mutex_lock(&psb->mcache_lock);
35002 err = pohmelfs_mcache_insert(psb, m);
35003diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
35004index 985b6b7..7699e05 100644
35005--- a/drivers/staging/pohmelfs/netfs.h
35006+++ b/drivers/staging/pohmelfs/netfs.h
35007@@ -571,14 +571,14 @@ struct pohmelfs_config;
35008 struct pohmelfs_sb {
35009 struct rb_root mcache_root;
35010 struct mutex mcache_lock;
35011- atomic_long_t mcache_gen;
35012+ atomic_long_unchecked_t mcache_gen;
35013 unsigned long mcache_timeout;
35014
35015 unsigned int idx;
35016
35017 unsigned int trans_retries;
35018
35019- atomic_t trans_gen;
35020+ atomic_unchecked_t trans_gen;
35021
35022 unsigned int crypto_attached_size;
35023 unsigned int crypto_align_size;
35024diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
35025index 06c1a74..866eebc 100644
35026--- a/drivers/staging/pohmelfs/trans.c
35027+++ b/drivers/staging/pohmelfs/trans.c
35028@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
35029 int err;
35030 struct netfs_cmd *cmd = t->iovec.iov_base;
35031
35032- t->gen = atomic_inc_return(&psb->trans_gen);
35033+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35034
35035 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35036 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35037diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
35038index 86308a0..feaa925 100644
35039--- a/drivers/staging/rtl8712/rtl871x_io.h
35040+++ b/drivers/staging/rtl8712/rtl871x_io.h
35041@@ -108,7 +108,7 @@ struct _io_ops {
35042 u8 *pmem);
35043 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35044 u8 *pmem);
35045-};
35046+} __no_const;
35047
35048 struct io_req {
35049 struct list_head list;
35050diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
35051index c7b5e8b..783d6cb 100644
35052--- a/drivers/staging/sbe-2t3e3/netdev.c
35053+++ b/drivers/staging/sbe-2t3e3/netdev.c
35054@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35055 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35056
35057 if (rlen)
35058- if (copy_to_user(data, &resp, rlen))
35059+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35060 return -EFAULT;
35061
35062 return 0;
35063diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
35064index be21617..0954e45 100644
35065--- a/drivers/staging/usbip/usbip_common.h
35066+++ b/drivers/staging/usbip/usbip_common.h
35067@@ -289,7 +289,7 @@ struct usbip_device {
35068 void (*shutdown)(struct usbip_device *);
35069 void (*reset)(struct usbip_device *);
35070 void (*unusable)(struct usbip_device *);
35071- } eh_ops;
35072+ } __no_const eh_ops;
35073 };
35074
35075 #if 0
35076diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
35077index 88b3298..3783eee 100644
35078--- a/drivers/staging/usbip/vhci.h
35079+++ b/drivers/staging/usbip/vhci.h
35080@@ -88,7 +88,7 @@ struct vhci_hcd {
35081 unsigned resuming:1;
35082 unsigned long re_timeout;
35083
35084- atomic_t seqnum;
35085+ atomic_unchecked_t seqnum;
35086
35087 /*
35088 * NOTE:
35089diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
35090index 2ee97e2..0420b86 100644
35091--- a/drivers/staging/usbip/vhci_hcd.c
35092+++ b/drivers/staging/usbip/vhci_hcd.c
35093@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35094 return;
35095 }
35096
35097- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35098+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35099 if (priv->seqnum == 0xffff)
35100 dev_info(&urb->dev->dev, "seqnum max\n");
35101
35102@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
35103 return -ENOMEM;
35104 }
35105
35106- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35107+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35108 if (unlink->seqnum == 0xffff)
35109 pr_info("seqnum max\n");
35110
35111@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
35112 vdev->rhport = rhport;
35113 }
35114
35115- atomic_set(&vhci->seqnum, 0);
35116+ atomic_set_unchecked(&vhci->seqnum, 0);
35117 spin_lock_init(&vhci->lock);
35118
35119 hcd->power_budget = 0; /* no limit */
35120diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
35121index 3872b8c..fe6d2f4 100644
35122--- a/drivers/staging/usbip/vhci_rx.c
35123+++ b/drivers/staging/usbip/vhci_rx.c
35124@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
35125 if (!urb) {
35126 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35127 pr_info("max seqnum %d\n",
35128- atomic_read(&the_controller->seqnum));
35129+ atomic_read_unchecked(&the_controller->seqnum));
35130 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35131 return;
35132 }
35133diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
35134index 7735027..30eed13 100644
35135--- a/drivers/staging/vt6655/hostap.c
35136+++ b/drivers/staging/vt6655/hostap.c
35137@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
35138 *
35139 */
35140
35141+static net_device_ops_no_const apdev_netdev_ops;
35142+
35143 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35144 {
35145 PSDevice apdev_priv;
35146 struct net_device *dev = pDevice->dev;
35147 int ret;
35148- const struct net_device_ops apdev_netdev_ops = {
35149- .ndo_start_xmit = pDevice->tx_80211,
35150- };
35151
35152 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35153
35154@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35155 *apdev_priv = *pDevice;
35156 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35157
35158+ /* only half broken now */
35159+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35160 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35161
35162 pDevice->apdev->type = ARPHRD_IEEE80211;
35163diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
35164index 51b5adf..098e320 100644
35165--- a/drivers/staging/vt6656/hostap.c
35166+++ b/drivers/staging/vt6656/hostap.c
35167@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
35168 *
35169 */
35170
35171+static net_device_ops_no_const apdev_netdev_ops;
35172+
35173 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35174 {
35175 PSDevice apdev_priv;
35176 struct net_device *dev = pDevice->dev;
35177 int ret;
35178- const struct net_device_ops apdev_netdev_ops = {
35179- .ndo_start_xmit = pDevice->tx_80211,
35180- };
35181
35182 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35183
35184@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35185 *apdev_priv = *pDevice;
35186 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35187
35188+ /* only half broken now */
35189+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35190 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35191
35192 pDevice->apdev->type = ARPHRD_IEEE80211;
35193diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
35194index 7843dfd..3db105f 100644
35195--- a/drivers/staging/wlan-ng/hfa384x_usb.c
35196+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
35197@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
35198
35199 struct usbctlx_completor {
35200 int (*complete) (struct usbctlx_completor *);
35201-};
35202+} __no_const;
35203
35204 static int
35205 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35206diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
35207index 1ca66ea..76f1343 100644
35208--- a/drivers/staging/zcache/tmem.c
35209+++ b/drivers/staging/zcache/tmem.c
35210@@ -39,7 +39,7 @@
35211 * A tmem host implementation must use this function to register callbacks
35212 * for memory allocation.
35213 */
35214-static struct tmem_hostops tmem_hostops;
35215+static tmem_hostops_no_const tmem_hostops;
35216
35217 static void tmem_objnode_tree_init(void);
35218
35219@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
35220 * A tmem host implementation must use this function to register
35221 * callbacks for a page-accessible memory (PAM) implementation
35222 */
35223-static struct tmem_pamops tmem_pamops;
35224+static tmem_pamops_no_const tmem_pamops;
35225
35226 void tmem_register_pamops(struct tmem_pamops *m)
35227 {
35228diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
35229index ed147c4..94fc3c6 100644
35230--- a/drivers/staging/zcache/tmem.h
35231+++ b/drivers/staging/zcache/tmem.h
35232@@ -180,6 +180,7 @@ struct tmem_pamops {
35233 void (*new_obj)(struct tmem_obj *);
35234 int (*replace_in_obj)(void *, struct tmem_obj *);
35235 };
35236+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35237 extern void tmem_register_pamops(struct tmem_pamops *m);
35238
35239 /* memory allocation methods provided by the host implementation */
35240@@ -189,6 +190,7 @@ struct tmem_hostops {
35241 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35242 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35243 };
35244+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35245 extern void tmem_register_hostops(struct tmem_hostops *m);
35246
35247 /* core tmem accessor functions */
35248diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
35249index 8599545..7761358 100644
35250--- a/drivers/target/iscsi/iscsi_target.c
35251+++ b/drivers/target/iscsi/iscsi_target.c
35252@@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
35253 * outstanding_r2ts reaches zero, go ahead and send the delayed
35254 * TASK_ABORTED status.
35255 */
35256- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35257+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35258 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35259 if (--cmd->outstanding_r2ts < 1) {
35260 iscsit_stop_dataout_timer(cmd);
35261diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
35262index 6845228..df77141 100644
35263--- a/drivers/target/target_core_tmr.c
35264+++ b/drivers/target/target_core_tmr.c
35265@@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
35266 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35267 cmd->t_task_list_num,
35268 atomic_read(&cmd->t_task_cdbs_left),
35269- atomic_read(&cmd->t_task_cdbs_sent),
35270+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35271 atomic_read(&cmd->t_transport_active),
35272 atomic_read(&cmd->t_transport_stop),
35273 atomic_read(&cmd->t_transport_sent));
35274@@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
35275 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35276 " task: %p, t_fe_count: %d dev: %p\n", task,
35277 fe_count, dev);
35278- atomic_set(&cmd->t_transport_aborted, 1);
35279+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35280 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35281
35282 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35283@@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
35284 }
35285 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35286 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35287- atomic_set(&cmd->t_transport_aborted, 1);
35288+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35289 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35290
35291 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35292diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
35293index e87d0eb..856cbcc 100644
35294--- a/drivers/target/target_core_transport.c
35295+++ b/drivers/target/target_core_transport.c
35296@@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
35297
35298 dev->queue_depth = dev_limits->queue_depth;
35299 atomic_set(&dev->depth_left, dev->queue_depth);
35300- atomic_set(&dev->dev_ordered_id, 0);
35301+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
35302
35303 se_dev_set_default_attribs(dev, dev_limits);
35304
35305@@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
35306 * Used to determine when ORDERED commands should go from
35307 * Dormant to Active status.
35308 */
35309- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35310+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35311 smp_mb__after_atomic_inc();
35312 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35313 cmd->se_ordered_id, cmd->sam_task_attr,
35314@@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
35315 " t_transport_active: %d t_transport_stop: %d"
35316 " t_transport_sent: %d\n", cmd->t_task_list_num,
35317 atomic_read(&cmd->t_task_cdbs_left),
35318- atomic_read(&cmd->t_task_cdbs_sent),
35319+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35320 atomic_read(&cmd->t_task_cdbs_ex_left),
35321 atomic_read(&cmd->t_transport_active),
35322 atomic_read(&cmd->t_transport_stop),
35323@@ -2089,9 +2089,9 @@ check_depth:
35324
35325 spin_lock_irqsave(&cmd->t_state_lock, flags);
35326 task->task_flags |= (TF_ACTIVE | TF_SENT);
35327- atomic_inc(&cmd->t_task_cdbs_sent);
35328+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35329
35330- if (atomic_read(&cmd->t_task_cdbs_sent) ==
35331+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35332 cmd->t_task_list_num)
35333 atomic_set(&cmd->t_transport_sent, 1);
35334
35335@@ -4260,7 +4260,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
35336 atomic_set(&cmd->transport_lun_stop, 0);
35337 }
35338 if (!atomic_read(&cmd->t_transport_active) ||
35339- atomic_read(&cmd->t_transport_aborted)) {
35340+ atomic_read_unchecked(&cmd->t_transport_aborted)) {
35341 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35342 return false;
35343 }
35344@@ -4509,7 +4509,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
35345 {
35346 int ret = 0;
35347
35348- if (atomic_read(&cmd->t_transport_aborted) != 0) {
35349+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35350 if (!send_status ||
35351 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35352 return 1;
35353@@ -4546,7 +4546,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
35354 */
35355 if (cmd->data_direction == DMA_TO_DEVICE) {
35356 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35357- atomic_inc(&cmd->t_transport_aborted);
35358+ atomic_inc_unchecked(&cmd->t_transport_aborted);
35359 smp_mb__after_atomic_inc();
35360 }
35361 }
35362diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
35363index b9040be..e3f5aab 100644
35364--- a/drivers/tty/hvc/hvcs.c
35365+++ b/drivers/tty/hvc/hvcs.c
35366@@ -83,6 +83,7 @@
35367 #include <asm/hvcserver.h>
35368 #include <asm/uaccess.h>
35369 #include <asm/vio.h>
35370+#include <asm/local.h>
35371
35372 /*
35373 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35374@@ -270,7 +271,7 @@ struct hvcs_struct {
35375 unsigned int index;
35376
35377 struct tty_struct *tty;
35378- int open_count;
35379+ local_t open_count;
35380
35381 /*
35382 * Used to tell the driver kernel_thread what operations need to take
35383@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
35384
35385 spin_lock_irqsave(&hvcsd->lock, flags);
35386
35387- if (hvcsd->open_count > 0) {
35388+ if (local_read(&hvcsd->open_count) > 0) {
35389 spin_unlock_irqrestore(&hvcsd->lock, flags);
35390 printk(KERN_INFO "HVCS: vterm state unchanged. "
35391 "The hvcs device node is still in use.\n");
35392@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
35393 if ((retval = hvcs_partner_connect(hvcsd)))
35394 goto error_release;
35395
35396- hvcsd->open_count = 1;
35397+ local_set(&hvcsd->open_count, 1);
35398 hvcsd->tty = tty;
35399 tty->driver_data = hvcsd;
35400
35401@@ -1179,7 +1180,7 @@ fast_open:
35402
35403 spin_lock_irqsave(&hvcsd->lock, flags);
35404 kref_get(&hvcsd->kref);
35405- hvcsd->open_count++;
35406+ local_inc(&hvcsd->open_count);
35407 hvcsd->todo_mask |= HVCS_SCHED_READ;
35408 spin_unlock_irqrestore(&hvcsd->lock, flags);
35409
35410@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35411 hvcsd = tty->driver_data;
35412
35413 spin_lock_irqsave(&hvcsd->lock, flags);
35414- if (--hvcsd->open_count == 0) {
35415+ if (local_dec_and_test(&hvcsd->open_count)) {
35416
35417 vio_disable_interrupts(hvcsd->vdev);
35418
35419@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35420 free_irq(irq, hvcsd);
35421 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35422 return;
35423- } else if (hvcsd->open_count < 0) {
35424+ } else if (local_read(&hvcsd->open_count) < 0) {
35425 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35426 " is missmanaged.\n",
35427- hvcsd->vdev->unit_address, hvcsd->open_count);
35428+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35429 }
35430
35431 spin_unlock_irqrestore(&hvcsd->lock, flags);
35432@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35433
35434 spin_lock_irqsave(&hvcsd->lock, flags);
35435 /* Preserve this so that we know how many kref refs to put */
35436- temp_open_count = hvcsd->open_count;
35437+ temp_open_count = local_read(&hvcsd->open_count);
35438
35439 /*
35440 * Don't kref put inside the spinlock because the destruction
35441@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35442 hvcsd->tty->driver_data = NULL;
35443 hvcsd->tty = NULL;
35444
35445- hvcsd->open_count = 0;
35446+ local_set(&hvcsd->open_count, 0);
35447
35448 /* This will drop any buffered data on the floor which is OK in a hangup
35449 * scenario. */
35450@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
35451 * the middle of a write operation? This is a crummy place to do this
35452 * but we want to keep it all in the spinlock.
35453 */
35454- if (hvcsd->open_count <= 0) {
35455+ if (local_read(&hvcsd->open_count) <= 0) {
35456 spin_unlock_irqrestore(&hvcsd->lock, flags);
35457 return -ENODEV;
35458 }
35459@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
35460 {
35461 struct hvcs_struct *hvcsd = tty->driver_data;
35462
35463- if (!hvcsd || hvcsd->open_count <= 0)
35464+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35465 return 0;
35466
35467 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35468diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
35469index ef92869..f4ebd88 100644
35470--- a/drivers/tty/ipwireless/tty.c
35471+++ b/drivers/tty/ipwireless/tty.c
35472@@ -29,6 +29,7 @@
35473 #include <linux/tty_driver.h>
35474 #include <linux/tty_flip.h>
35475 #include <linux/uaccess.h>
35476+#include <asm/local.h>
35477
35478 #include "tty.h"
35479 #include "network.h"
35480@@ -51,7 +52,7 @@ struct ipw_tty {
35481 int tty_type;
35482 struct ipw_network *network;
35483 struct tty_struct *linux_tty;
35484- int open_count;
35485+ local_t open_count;
35486 unsigned int control_lines;
35487 struct mutex ipw_tty_mutex;
35488 int tx_bytes_queued;
35489@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35490 mutex_unlock(&tty->ipw_tty_mutex);
35491 return -ENODEV;
35492 }
35493- if (tty->open_count == 0)
35494+ if (local_read(&tty->open_count) == 0)
35495 tty->tx_bytes_queued = 0;
35496
35497- tty->open_count++;
35498+ local_inc(&tty->open_count);
35499
35500 tty->linux_tty = linux_tty;
35501 linux_tty->driver_data = tty;
35502@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35503
35504 static void do_ipw_close(struct ipw_tty *tty)
35505 {
35506- tty->open_count--;
35507-
35508- if (tty->open_count == 0) {
35509+ if (local_dec_return(&tty->open_count) == 0) {
35510 struct tty_struct *linux_tty = tty->linux_tty;
35511
35512 if (linux_tty != NULL) {
35513@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
35514 return;
35515
35516 mutex_lock(&tty->ipw_tty_mutex);
35517- if (tty->open_count == 0) {
35518+ if (local_read(&tty->open_count) == 0) {
35519 mutex_unlock(&tty->ipw_tty_mutex);
35520 return;
35521 }
35522@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
35523 return;
35524 }
35525
35526- if (!tty->open_count) {
35527+ if (!local_read(&tty->open_count)) {
35528 mutex_unlock(&tty->ipw_tty_mutex);
35529 return;
35530 }
35531@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
35532 return -ENODEV;
35533
35534 mutex_lock(&tty->ipw_tty_mutex);
35535- if (!tty->open_count) {
35536+ if (!local_read(&tty->open_count)) {
35537 mutex_unlock(&tty->ipw_tty_mutex);
35538 return -EINVAL;
35539 }
35540@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
35541 if (!tty)
35542 return -ENODEV;
35543
35544- if (!tty->open_count)
35545+ if (!local_read(&tty->open_count))
35546 return -EINVAL;
35547
35548 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35549@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
35550 if (!tty)
35551 return 0;
35552
35553- if (!tty->open_count)
35554+ if (!local_read(&tty->open_count))
35555 return 0;
35556
35557 return tty->tx_bytes_queued;
35558@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
35559 if (!tty)
35560 return -ENODEV;
35561
35562- if (!tty->open_count)
35563+ if (!local_read(&tty->open_count))
35564 return -EINVAL;
35565
35566 return get_control_lines(tty);
35567@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
35568 if (!tty)
35569 return -ENODEV;
35570
35571- if (!tty->open_count)
35572+ if (!local_read(&tty->open_count))
35573 return -EINVAL;
35574
35575 return set_control_lines(tty, set, clear);
35576@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
35577 if (!tty)
35578 return -ENODEV;
35579
35580- if (!tty->open_count)
35581+ if (!local_read(&tty->open_count))
35582 return -EINVAL;
35583
35584 /* FIXME: Exactly how is the tty object locked here .. */
35585@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
35586 against a parallel ioctl etc */
35587 mutex_lock(&ttyj->ipw_tty_mutex);
35588 }
35589- while (ttyj->open_count)
35590+ while (local_read(&ttyj->open_count))
35591 do_ipw_close(ttyj);
35592 ipwireless_disassociate_network_ttys(network,
35593 ttyj->channel_idx);
35594diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
35595index fc7bbba..9527e93 100644
35596--- a/drivers/tty/n_gsm.c
35597+++ b/drivers/tty/n_gsm.c
35598@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
35599 kref_init(&dlci->ref);
35600 mutex_init(&dlci->mutex);
35601 dlci->fifo = &dlci->_fifo;
35602- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35603+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35604 kfree(dlci);
35605 return NULL;
35606 }
35607diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
35608index 39d6ab6..eb97f41 100644
35609--- a/drivers/tty/n_tty.c
35610+++ b/drivers/tty/n_tty.c
35611@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
35612 {
35613 *ops = tty_ldisc_N_TTY;
35614 ops->owner = NULL;
35615- ops->refcount = ops->flags = 0;
35616+ atomic_set(&ops->refcount, 0);
35617+ ops->flags = 0;
35618 }
35619 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35620diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
35621index e18604b..a7d5a11 100644
35622--- a/drivers/tty/pty.c
35623+++ b/drivers/tty/pty.c
35624@@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35625 register_sysctl_table(pty_root_table);
35626
35627 /* Now create the /dev/ptmx special device */
35628+ pax_open_kernel();
35629 tty_default_fops(&ptmx_fops);
35630- ptmx_fops.open = ptmx_open;
35631+ *(void **)&ptmx_fops.open = ptmx_open;
35632+ pax_close_kernel();
35633
35634 cdev_init(&ptmx_cdev, &ptmx_fops);
35635 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35636diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
35637index 2b42a01..32a2ed3 100644
35638--- a/drivers/tty/serial/kgdboc.c
35639+++ b/drivers/tty/serial/kgdboc.c
35640@@ -24,8 +24,9 @@
35641 #define MAX_CONFIG_LEN 40
35642
35643 static struct kgdb_io kgdboc_io_ops;
35644+static struct kgdb_io kgdboc_io_ops_console;
35645
35646-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35647+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35648 static int configured = -1;
35649
35650 static char config[MAX_CONFIG_LEN];
35651@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
35652 kgdboc_unregister_kbd();
35653 if (configured == 1)
35654 kgdb_unregister_io_module(&kgdboc_io_ops);
35655+ else if (configured == 2)
35656+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
35657 }
35658
35659 static int configure_kgdboc(void)
35660@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
35661 int err;
35662 char *cptr = config;
35663 struct console *cons;
35664+ int is_console = 0;
35665
35666 err = kgdboc_option_setup(config);
35667 if (err || !strlen(config) || isspace(config[0]))
35668 goto noconfig;
35669
35670 err = -ENODEV;
35671- kgdboc_io_ops.is_console = 0;
35672 kgdb_tty_driver = NULL;
35673
35674 kgdboc_use_kms = 0;
35675@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
35676 int idx;
35677 if (cons->device && cons->device(cons, &idx) == p &&
35678 idx == tty_line) {
35679- kgdboc_io_ops.is_console = 1;
35680+ is_console = 1;
35681 break;
35682 }
35683 cons = cons->next;
35684@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
35685 kgdb_tty_line = tty_line;
35686
35687 do_register:
35688- err = kgdb_register_io_module(&kgdboc_io_ops);
35689+ if (is_console) {
35690+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
35691+ configured = 2;
35692+ } else {
35693+ err = kgdb_register_io_module(&kgdboc_io_ops);
35694+ configured = 1;
35695+ }
35696 if (err)
35697 goto noconfig;
35698
35699- configured = 1;
35700-
35701 return 0;
35702
35703 noconfig:
35704@@ -213,7 +220,7 @@ noconfig:
35705 static int __init init_kgdboc(void)
35706 {
35707 /* Already configured? */
35708- if (configured == 1)
35709+ if (configured >= 1)
35710 return 0;
35711
35712 return configure_kgdboc();
35713@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
35714 if (config[len - 1] == '\n')
35715 config[len - 1] = '\0';
35716
35717- if (configured == 1)
35718+ if (configured >= 1)
35719 cleanup_kgdboc();
35720
35721 /* Go and configure with the new params. */
35722@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
35723 .post_exception = kgdboc_post_exp_handler,
35724 };
35725
35726+static struct kgdb_io kgdboc_io_ops_console = {
35727+ .name = "kgdboc",
35728+ .read_char = kgdboc_get_char,
35729+ .write_char = kgdboc_put_char,
35730+ .pre_exception = kgdboc_pre_exp_handler,
35731+ .post_exception = kgdboc_post_exp_handler,
35732+ .is_console = 1
35733+};
35734+
35735 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35736 /* This is only available if kgdboc is a built in for early debugging */
35737 static int __init kgdboc_early_init(char *opt)
35738diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
35739index 05085be..67eadb0 100644
35740--- a/drivers/tty/tty_io.c
35741+++ b/drivers/tty/tty_io.c
35742@@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35743
35744 void tty_default_fops(struct file_operations *fops)
35745 {
35746- *fops = tty_fops;
35747+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35748 }
35749
35750 /*
35751diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
35752index 8e0924f..4204eb4 100644
35753--- a/drivers/tty/tty_ldisc.c
35754+++ b/drivers/tty/tty_ldisc.c
35755@@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
35756 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35757 struct tty_ldisc_ops *ldo = ld->ops;
35758
35759- ldo->refcount--;
35760+ atomic_dec(&ldo->refcount);
35761 module_put(ldo->owner);
35762 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35763
35764@@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
35765 spin_lock_irqsave(&tty_ldisc_lock, flags);
35766 tty_ldiscs[disc] = new_ldisc;
35767 new_ldisc->num = disc;
35768- new_ldisc->refcount = 0;
35769+ atomic_set(&new_ldisc->refcount, 0);
35770 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35771
35772 return ret;
35773@@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
35774 return -EINVAL;
35775
35776 spin_lock_irqsave(&tty_ldisc_lock, flags);
35777- if (tty_ldiscs[disc]->refcount)
35778+ if (atomic_read(&tty_ldiscs[disc]->refcount))
35779 ret = -EBUSY;
35780 else
35781 tty_ldiscs[disc] = NULL;
35782@@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
35783 if (ldops) {
35784 ret = ERR_PTR(-EAGAIN);
35785 if (try_module_get(ldops->owner)) {
35786- ldops->refcount++;
35787+ atomic_inc(&ldops->refcount);
35788 ret = ldops;
35789 }
35790 }
35791@@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
35792 unsigned long flags;
35793
35794 spin_lock_irqsave(&tty_ldisc_lock, flags);
35795- ldops->refcount--;
35796+ atomic_dec(&ldops->refcount);
35797 module_put(ldops->owner);
35798 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35799 }
35800diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
35801index a605549..6bd3c96 100644
35802--- a/drivers/tty/vt/keyboard.c
35803+++ b/drivers/tty/vt/keyboard.c
35804@@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
35805 kbd->kbdmode == VC_OFF) &&
35806 value != KVAL(K_SAK))
35807 return; /* SAK is allowed even in raw mode */
35808+
35809+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
35810+ {
35811+ void *func = fn_handler[value];
35812+ if (func == fn_show_state || func == fn_show_ptregs ||
35813+ func == fn_show_mem)
35814+ return;
35815+ }
35816+#endif
35817+
35818 fn_handler[value](vc);
35819 }
35820
35821diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
35822index 5e096f4..0da1363 100644
35823--- a/drivers/tty/vt/vt_ioctl.c
35824+++ b/drivers/tty/vt/vt_ioctl.c
35825@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35826 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
35827 return -EFAULT;
35828
35829- if (!capable(CAP_SYS_TTY_CONFIG))
35830- perm = 0;
35831-
35832 switch (cmd) {
35833 case KDGKBENT:
35834 key_map = key_maps[s];
35835@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35836 val = (i ? K_HOLE : K_NOSUCHMAP);
35837 return put_user(val, &user_kbe->kb_value);
35838 case KDSKBENT:
35839+ if (!capable(CAP_SYS_TTY_CONFIG))
35840+ perm = 0;
35841+
35842 if (!perm)
35843 return -EPERM;
35844 if (!i && v == K_NOSUCHMAP) {
35845@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35846 int i, j, k;
35847 int ret;
35848
35849- if (!capable(CAP_SYS_TTY_CONFIG))
35850- perm = 0;
35851-
35852 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
35853 if (!kbs) {
35854 ret = -ENOMEM;
35855@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35856 kfree(kbs);
35857 return ((p && *p) ? -EOVERFLOW : 0);
35858 case KDSKBSENT:
35859+ if (!capable(CAP_SYS_TTY_CONFIG))
35860+ perm = 0;
35861+
35862 if (!perm) {
35863 ret = -EPERM;
35864 goto reterr;
35865diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
35866index a783d53..cb30d94 100644
35867--- a/drivers/uio/uio.c
35868+++ b/drivers/uio/uio.c
35869@@ -25,6 +25,7 @@
35870 #include <linux/kobject.h>
35871 #include <linux/cdev.h>
35872 #include <linux/uio_driver.h>
35873+#include <asm/local.h>
35874
35875 #define UIO_MAX_DEVICES (1U << MINORBITS)
35876
35877@@ -32,10 +33,10 @@ struct uio_device {
35878 struct module *owner;
35879 struct device *dev;
35880 int minor;
35881- atomic_t event;
35882+ atomic_unchecked_t event;
35883 struct fasync_struct *async_queue;
35884 wait_queue_head_t wait;
35885- int vma_count;
35886+ local_t vma_count;
35887 struct uio_info *info;
35888 struct kobject *map_dir;
35889 struct kobject *portio_dir;
35890@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
35891 struct device_attribute *attr, char *buf)
35892 {
35893 struct uio_device *idev = dev_get_drvdata(dev);
35894- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
35895+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
35896 }
35897
35898 static struct device_attribute uio_class_attributes[] = {
35899@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
35900 {
35901 struct uio_device *idev = info->uio_dev;
35902
35903- atomic_inc(&idev->event);
35904+ atomic_inc_unchecked(&idev->event);
35905 wake_up_interruptible(&idev->wait);
35906 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
35907 }
35908@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
35909 }
35910
35911 listener->dev = idev;
35912- listener->event_count = atomic_read(&idev->event);
35913+ listener->event_count = atomic_read_unchecked(&idev->event);
35914 filep->private_data = listener;
35915
35916 if (idev->info->open) {
35917@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
35918 return -EIO;
35919
35920 poll_wait(filep, &idev->wait, wait);
35921- if (listener->event_count != atomic_read(&idev->event))
35922+ if (listener->event_count != atomic_read_unchecked(&idev->event))
35923 return POLLIN | POLLRDNORM;
35924 return 0;
35925 }
35926@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
35927 do {
35928 set_current_state(TASK_INTERRUPTIBLE);
35929
35930- event_count = atomic_read(&idev->event);
35931+ event_count = atomic_read_unchecked(&idev->event);
35932 if (event_count != listener->event_count) {
35933 if (copy_to_user(buf, &event_count, count))
35934 retval = -EFAULT;
35935@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
35936 static void uio_vma_open(struct vm_area_struct *vma)
35937 {
35938 struct uio_device *idev = vma->vm_private_data;
35939- idev->vma_count++;
35940+ local_inc(&idev->vma_count);
35941 }
35942
35943 static void uio_vma_close(struct vm_area_struct *vma)
35944 {
35945 struct uio_device *idev = vma->vm_private_data;
35946- idev->vma_count--;
35947+ local_dec(&idev->vma_count);
35948 }
35949
35950 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35951@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
35952 idev->owner = owner;
35953 idev->info = info;
35954 init_waitqueue_head(&idev->wait);
35955- atomic_set(&idev->event, 0);
35956+ atomic_set_unchecked(&idev->event, 0);
35957
35958 ret = uio_get_minor(idev);
35959 if (ret)
35960diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
35961index a845f8b..4f54072 100644
35962--- a/drivers/usb/atm/cxacru.c
35963+++ b/drivers/usb/atm/cxacru.c
35964@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
35965 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
35966 if (ret < 2)
35967 return -EINVAL;
35968- if (index < 0 || index > 0x7f)
35969+ if (index > 0x7f)
35970 return -EINVAL;
35971 pos += tmp;
35972
35973diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
35974index d3448ca..d2864ca 100644
35975--- a/drivers/usb/atm/usbatm.c
35976+++ b/drivers/usb/atm/usbatm.c
35977@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
35978 if (printk_ratelimit())
35979 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
35980 __func__, vpi, vci);
35981- atomic_inc(&vcc->stats->rx_err);
35982+ atomic_inc_unchecked(&vcc->stats->rx_err);
35983 return;
35984 }
35985
35986@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
35987 if (length > ATM_MAX_AAL5_PDU) {
35988 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
35989 __func__, length, vcc);
35990- atomic_inc(&vcc->stats->rx_err);
35991+ atomic_inc_unchecked(&vcc->stats->rx_err);
35992 goto out;
35993 }
35994
35995@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
35996 if (sarb->len < pdu_length) {
35997 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
35998 __func__, pdu_length, sarb->len, vcc);
35999- atomic_inc(&vcc->stats->rx_err);
36000+ atomic_inc_unchecked(&vcc->stats->rx_err);
36001 goto out;
36002 }
36003
36004 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36005 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36006 __func__, vcc);
36007- atomic_inc(&vcc->stats->rx_err);
36008+ atomic_inc_unchecked(&vcc->stats->rx_err);
36009 goto out;
36010 }
36011
36012@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36013 if (printk_ratelimit())
36014 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36015 __func__, length);
36016- atomic_inc(&vcc->stats->rx_drop);
36017+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36018 goto out;
36019 }
36020
36021@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36022
36023 vcc->push(vcc, skb);
36024
36025- atomic_inc(&vcc->stats->rx);
36026+ atomic_inc_unchecked(&vcc->stats->rx);
36027 out:
36028 skb_trim(sarb, 0);
36029 }
36030@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
36031 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36032
36033 usbatm_pop(vcc, skb);
36034- atomic_inc(&vcc->stats->tx);
36035+ atomic_inc_unchecked(&vcc->stats->tx);
36036
36037 skb = skb_dequeue(&instance->sndqueue);
36038 }
36039@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
36040 if (!left--)
36041 return sprintf(page,
36042 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36043- atomic_read(&atm_dev->stats.aal5.tx),
36044- atomic_read(&atm_dev->stats.aal5.tx_err),
36045- atomic_read(&atm_dev->stats.aal5.rx),
36046- atomic_read(&atm_dev->stats.aal5.rx_err),
36047- atomic_read(&atm_dev->stats.aal5.rx_drop));
36048+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36049+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36050+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36051+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36052+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36053
36054 if (!left--) {
36055 if (instance->disconnected)
36056diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
36057index d956965..4179a77 100644
36058--- a/drivers/usb/core/devices.c
36059+++ b/drivers/usb/core/devices.c
36060@@ -126,7 +126,7 @@ static const char format_endpt[] =
36061 * time it gets called.
36062 */
36063 static struct device_connect_event {
36064- atomic_t count;
36065+ atomic_unchecked_t count;
36066 wait_queue_head_t wait;
36067 } device_event = {
36068 .count = ATOMIC_INIT(1),
36069@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
36070
36071 void usbfs_conn_disc_event(void)
36072 {
36073- atomic_add(2, &device_event.count);
36074+ atomic_add_unchecked(2, &device_event.count);
36075 wake_up(&device_event.wait);
36076 }
36077
36078@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
36079
36080 poll_wait(file, &device_event.wait, wait);
36081
36082- event_count = atomic_read(&device_event.count);
36083+ event_count = atomic_read_unchecked(&device_event.count);
36084 if (file->f_version != event_count) {
36085 file->f_version = event_count;
36086 return POLLIN | POLLRDNORM;
36087diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
36088index b3bdfed..a9460e0 100644
36089--- a/drivers/usb/core/message.c
36090+++ b/drivers/usb/core/message.c
36091@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
36092 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36093 if (buf) {
36094 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36095- if (len > 0) {
36096- smallbuf = kmalloc(++len, GFP_NOIO);
36097+ if (len++ > 0) {
36098+ smallbuf = kmalloc(len, GFP_NOIO);
36099 if (!smallbuf)
36100 return buf;
36101 memcpy(smallbuf, buf, len);
36102diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
36103index 1fc8f12..20647c1 100644
36104--- a/drivers/usb/early/ehci-dbgp.c
36105+++ b/drivers/usb/early/ehci-dbgp.c
36106@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
36107
36108 #ifdef CONFIG_KGDB
36109 static struct kgdb_io kgdbdbgp_io_ops;
36110-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36111+static struct kgdb_io kgdbdbgp_io_ops_console;
36112+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36113 #else
36114 #define dbgp_kgdb_mode (0)
36115 #endif
36116@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
36117 .write_char = kgdbdbgp_write_char,
36118 };
36119
36120+static struct kgdb_io kgdbdbgp_io_ops_console = {
36121+ .name = "kgdbdbgp",
36122+ .read_char = kgdbdbgp_read_char,
36123+ .write_char = kgdbdbgp_write_char,
36124+ .is_console = 1
36125+};
36126+
36127 static int kgdbdbgp_wait_time;
36128
36129 static int __init kgdbdbgp_parse_config(char *str)
36130@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
36131 ptr++;
36132 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36133 }
36134- kgdb_register_io_module(&kgdbdbgp_io_ops);
36135- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36136+ if (early_dbgp_console.index != -1)
36137+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36138+ else
36139+ kgdb_register_io_module(&kgdbdbgp_io_ops);
36140
36141 return 0;
36142 }
36143diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
36144index d6bea3e..60b250e 100644
36145--- a/drivers/usb/wusbcore/wa-hc.h
36146+++ b/drivers/usb/wusbcore/wa-hc.h
36147@@ -192,7 +192,7 @@ struct wahc {
36148 struct list_head xfer_delayed_list;
36149 spinlock_t xfer_list_lock;
36150 struct work_struct xfer_work;
36151- atomic_t xfer_id_count;
36152+ atomic_unchecked_t xfer_id_count;
36153 };
36154
36155
36156@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
36157 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36158 spin_lock_init(&wa->xfer_list_lock);
36159 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36160- atomic_set(&wa->xfer_id_count, 1);
36161+ atomic_set_unchecked(&wa->xfer_id_count, 1);
36162 }
36163
36164 /**
36165diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
36166index 57c01ab..8a05959 100644
36167--- a/drivers/usb/wusbcore/wa-xfer.c
36168+++ b/drivers/usb/wusbcore/wa-xfer.c
36169@@ -296,7 +296,7 @@ out:
36170 */
36171 static void wa_xfer_id_init(struct wa_xfer *xfer)
36172 {
36173- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36174+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36175 }
36176
36177 /*
36178diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
36179index c14c42b..f955cc2 100644
36180--- a/drivers/vhost/vhost.c
36181+++ b/drivers/vhost/vhost.c
36182@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
36183 return 0;
36184 }
36185
36186-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36187+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36188 {
36189 struct file *eventfp, *filep = NULL,
36190 *pollstart = NULL, *pollstop = NULL;
36191diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
36192index b0b2ac3..89a4399 100644
36193--- a/drivers/video/aty/aty128fb.c
36194+++ b/drivers/video/aty/aty128fb.c
36195@@ -148,7 +148,7 @@ enum {
36196 };
36197
36198 /* Must match above enum */
36199-static const char *r128_family[] __devinitdata = {
36200+static const char *r128_family[] __devinitconst = {
36201 "AGP",
36202 "PCI",
36203 "PRO AGP",
36204diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
36205index 5c3960d..15cf8fc 100644
36206--- a/drivers/video/fbcmap.c
36207+++ b/drivers/video/fbcmap.c
36208@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
36209 rc = -ENODEV;
36210 goto out;
36211 }
36212- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36213- !info->fbops->fb_setcmap)) {
36214+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36215 rc = -EINVAL;
36216 goto out1;
36217 }
36218diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
36219index ad93629..e020fc3 100644
36220--- a/drivers/video/fbmem.c
36221+++ b/drivers/video/fbmem.c
36222@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36223 image->dx += image->width + 8;
36224 }
36225 } else if (rotate == FB_ROTATE_UD) {
36226- for (x = 0; x < num && image->dx >= 0; x++) {
36227+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36228 info->fbops->fb_imageblit(info, image);
36229 image->dx -= image->width + 8;
36230 }
36231@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36232 image->dy += image->height + 8;
36233 }
36234 } else if (rotate == FB_ROTATE_CCW) {
36235- for (x = 0; x < num && image->dy >= 0; x++) {
36236+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36237 info->fbops->fb_imageblit(info, image);
36238 image->dy -= image->height + 8;
36239 }
36240@@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
36241 return -EFAULT;
36242 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36243 return -EINVAL;
36244- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36245+ if (con2fb.framebuffer >= FB_MAX)
36246 return -EINVAL;
36247 if (!registered_fb[con2fb.framebuffer])
36248 request_module("fb%d", con2fb.framebuffer);
36249diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
36250index 5a5d092..265c5ed 100644
36251--- a/drivers/video/geode/gx1fb_core.c
36252+++ b/drivers/video/geode/gx1fb_core.c
36253@@ -29,7 +29,7 @@ static int crt_option = 1;
36254 static char panel_option[32] = "";
36255
36256 /* Modes relevant to the GX1 (taken from modedb.c) */
36257-static const struct fb_videomode __devinitdata gx1_modedb[] = {
36258+static const struct fb_videomode __devinitconst gx1_modedb[] = {
36259 /* 640x480-60 VESA */
36260 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36261 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36262diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
36263index 0fad23f..0e9afa4 100644
36264--- a/drivers/video/gxt4500.c
36265+++ b/drivers/video/gxt4500.c
36266@@ -156,7 +156,7 @@ struct gxt4500_par {
36267 static char *mode_option;
36268
36269 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36270-static const struct fb_videomode defaultmode __devinitdata = {
36271+static const struct fb_videomode defaultmode __devinitconst = {
36272 .refresh = 60,
36273 .xres = 1280,
36274 .yres = 1024,
36275@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
36276 return 0;
36277 }
36278
36279-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36280+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36281 .id = "IBM GXT4500P",
36282 .type = FB_TYPE_PACKED_PIXELS,
36283 .visual = FB_VISUAL_PSEUDOCOLOR,
36284diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
36285index 7672d2e..b56437f 100644
36286--- a/drivers/video/i810/i810_accel.c
36287+++ b/drivers/video/i810/i810_accel.c
36288@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
36289 }
36290 }
36291 printk("ringbuffer lockup!!!\n");
36292+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36293 i810_report_error(mmio);
36294 par->dev_flags |= LOCKUP;
36295 info->pixmap.scan_align = 1;
36296diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
36297index 318f6fb..9a389c1 100644
36298--- a/drivers/video/i810/i810_main.c
36299+++ b/drivers/video/i810/i810_main.c
36300@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
36301 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36302
36303 /* PCI */
36304-static const char *i810_pci_list[] __devinitdata = {
36305+static const char *i810_pci_list[] __devinitconst = {
36306 "Intel(R) 810 Framebuffer Device" ,
36307 "Intel(R) 810-DC100 Framebuffer Device" ,
36308 "Intel(R) 810E Framebuffer Device" ,
36309diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
36310index de36693..3c63fc2 100644
36311--- a/drivers/video/jz4740_fb.c
36312+++ b/drivers/video/jz4740_fb.c
36313@@ -136,7 +136,7 @@ struct jzfb {
36314 uint32_t pseudo_palette[16];
36315 };
36316
36317-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36318+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36319 .id = "JZ4740 FB",
36320 .type = FB_TYPE_PACKED_PIXELS,
36321 .visual = FB_VISUAL_TRUECOLOR,
36322diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
36323index 3c14e43..eafa544 100644
36324--- a/drivers/video/logo/logo_linux_clut224.ppm
36325+++ b/drivers/video/logo/logo_linux_clut224.ppm
36326@@ -1,1604 +1,1123 @@
36327 P3
36328-# Standard 224-color Linux logo
36329 80 80
36330 255
36331- 0 0 0 0 0 0 0 0 0 0 0 0
36332- 0 0 0 0 0 0 0 0 0 0 0 0
36333- 0 0 0 0 0 0 0 0 0 0 0 0
36334- 0 0 0 0 0 0 0 0 0 0 0 0
36335- 0 0 0 0 0 0 0 0 0 0 0 0
36336- 0 0 0 0 0 0 0 0 0 0 0 0
36337- 0 0 0 0 0 0 0 0 0 0 0 0
36338- 0 0 0 0 0 0 0 0 0 0 0 0
36339- 0 0 0 0 0 0 0 0 0 0 0 0
36340- 6 6 6 6 6 6 10 10 10 10 10 10
36341- 10 10 10 6 6 6 6 6 6 6 6 6
36342- 0 0 0 0 0 0 0 0 0 0 0 0
36343- 0 0 0 0 0 0 0 0 0 0 0 0
36344- 0 0 0 0 0 0 0 0 0 0 0 0
36345- 0 0 0 0 0 0 0 0 0 0 0 0
36346- 0 0 0 0 0 0 0 0 0 0 0 0
36347- 0 0 0 0 0 0 0 0 0 0 0 0
36348- 0 0 0 0 0 0 0 0 0 0 0 0
36349- 0 0 0 0 0 0 0 0 0 0 0 0
36350- 0 0 0 0 0 0 0 0 0 0 0 0
36351- 0 0 0 0 0 0 0 0 0 0 0 0
36352- 0 0 0 0 0 0 0 0 0 0 0 0
36353- 0 0 0 0 0 0 0 0 0 0 0 0
36354- 0 0 0 0 0 0 0 0 0 0 0 0
36355- 0 0 0 0 0 0 0 0 0 0 0 0
36356- 0 0 0 0 0 0 0 0 0 0 0 0
36357- 0 0 0 0 0 0 0 0 0 0 0 0
36358- 0 0 0 0 0 0 0 0 0 0 0 0
36359- 0 0 0 6 6 6 10 10 10 14 14 14
36360- 22 22 22 26 26 26 30 30 30 34 34 34
36361- 30 30 30 30 30 30 26 26 26 18 18 18
36362- 14 14 14 10 10 10 6 6 6 0 0 0
36363- 0 0 0 0 0 0 0 0 0 0 0 0
36364- 0 0 0 0 0 0 0 0 0 0 0 0
36365- 0 0 0 0 0 0 0 0 0 0 0 0
36366- 0 0 0 0 0 0 0 0 0 0 0 0
36367- 0 0 0 0 0 0 0 0 0 0 0 0
36368- 0 0 0 0 0 0 0 0 0 0 0 0
36369- 0 0 0 0 0 0 0 0 0 0 0 0
36370- 0 0 0 0 0 0 0 0 0 0 0 0
36371- 0 0 0 0 0 0 0 0 0 0 0 0
36372- 0 0 0 0 0 1 0 0 1 0 0 0
36373- 0 0 0 0 0 0 0 0 0 0 0 0
36374- 0 0 0 0 0 0 0 0 0 0 0 0
36375- 0 0 0 0 0 0 0 0 0 0 0 0
36376- 0 0 0 0 0 0 0 0 0 0 0 0
36377- 0 0 0 0 0 0 0 0 0 0 0 0
36378- 0 0 0 0 0 0 0 0 0 0 0 0
36379- 6 6 6 14 14 14 26 26 26 42 42 42
36380- 54 54 54 66 66 66 78 78 78 78 78 78
36381- 78 78 78 74 74 74 66 66 66 54 54 54
36382- 42 42 42 26 26 26 18 18 18 10 10 10
36383- 6 6 6 0 0 0 0 0 0 0 0 0
36384- 0 0 0 0 0 0 0 0 0 0 0 0
36385- 0 0 0 0 0 0 0 0 0 0 0 0
36386- 0 0 0 0 0 0 0 0 0 0 0 0
36387- 0 0 0 0 0 0 0 0 0 0 0 0
36388- 0 0 0 0 0 0 0 0 0 0 0 0
36389- 0 0 0 0 0 0 0 0 0 0 0 0
36390- 0 0 0 0 0 0 0 0 0 0 0 0
36391- 0 0 0 0 0 0 0 0 0 0 0 0
36392- 0 0 1 0 0 0 0 0 0 0 0 0
36393- 0 0 0 0 0 0 0 0 0 0 0 0
36394- 0 0 0 0 0 0 0 0 0 0 0 0
36395- 0 0 0 0 0 0 0 0 0 0 0 0
36396- 0 0 0 0 0 0 0 0 0 0 0 0
36397- 0 0 0 0 0 0 0 0 0 0 0 0
36398- 0 0 0 0 0 0 0 0 0 10 10 10
36399- 22 22 22 42 42 42 66 66 66 86 86 86
36400- 66 66 66 38 38 38 38 38 38 22 22 22
36401- 26 26 26 34 34 34 54 54 54 66 66 66
36402- 86 86 86 70 70 70 46 46 46 26 26 26
36403- 14 14 14 6 6 6 0 0 0 0 0 0
36404- 0 0 0 0 0 0 0 0 0 0 0 0
36405- 0 0 0 0 0 0 0 0 0 0 0 0
36406- 0 0 0 0 0 0 0 0 0 0 0 0
36407- 0 0 0 0 0 0 0 0 0 0 0 0
36408- 0 0 0 0 0 0 0 0 0 0 0 0
36409- 0 0 0 0 0 0 0 0 0 0 0 0
36410- 0 0 0 0 0 0 0 0 0 0 0 0
36411- 0 0 0 0 0 0 0 0 0 0 0 0
36412- 0 0 1 0 0 1 0 0 1 0 0 0
36413- 0 0 0 0 0 0 0 0 0 0 0 0
36414- 0 0 0 0 0 0 0 0 0 0 0 0
36415- 0 0 0 0 0 0 0 0 0 0 0 0
36416- 0 0 0 0 0 0 0 0 0 0 0 0
36417- 0 0 0 0 0 0 0 0 0 0 0 0
36418- 0 0 0 0 0 0 10 10 10 26 26 26
36419- 50 50 50 82 82 82 58 58 58 6 6 6
36420- 2 2 6 2 2 6 2 2 6 2 2 6
36421- 2 2 6 2 2 6 2 2 6 2 2 6
36422- 6 6 6 54 54 54 86 86 86 66 66 66
36423- 38 38 38 18 18 18 6 6 6 0 0 0
36424- 0 0 0 0 0 0 0 0 0 0 0 0
36425- 0 0 0 0 0 0 0 0 0 0 0 0
36426- 0 0 0 0 0 0 0 0 0 0 0 0
36427- 0 0 0 0 0 0 0 0 0 0 0 0
36428- 0 0 0 0 0 0 0 0 0 0 0 0
36429- 0 0 0 0 0 0 0 0 0 0 0 0
36430- 0 0 0 0 0 0 0 0 0 0 0 0
36431- 0 0 0 0 0 0 0 0 0 0 0 0
36432- 0 0 0 0 0 0 0 0 0 0 0 0
36433- 0 0 0 0 0 0 0 0 0 0 0 0
36434- 0 0 0 0 0 0 0 0 0 0 0 0
36435- 0 0 0 0 0 0 0 0 0 0 0 0
36436- 0 0 0 0 0 0 0 0 0 0 0 0
36437- 0 0 0 0 0 0 0 0 0 0 0 0
36438- 0 0 0 6 6 6 22 22 22 50 50 50
36439- 78 78 78 34 34 34 2 2 6 2 2 6
36440- 2 2 6 2 2 6 2 2 6 2 2 6
36441- 2 2 6 2 2 6 2 2 6 2 2 6
36442- 2 2 6 2 2 6 6 6 6 70 70 70
36443- 78 78 78 46 46 46 22 22 22 6 6 6
36444- 0 0 0 0 0 0 0 0 0 0 0 0
36445- 0 0 0 0 0 0 0 0 0 0 0 0
36446- 0 0 0 0 0 0 0 0 0 0 0 0
36447- 0 0 0 0 0 0 0 0 0 0 0 0
36448- 0 0 0 0 0 0 0 0 0 0 0 0
36449- 0 0 0 0 0 0 0 0 0 0 0 0
36450- 0 0 0 0 0 0 0 0 0 0 0 0
36451- 0 0 0 0 0 0 0 0 0 0 0 0
36452- 0 0 1 0 0 1 0 0 1 0 0 0
36453- 0 0 0 0 0 0 0 0 0 0 0 0
36454- 0 0 0 0 0 0 0 0 0 0 0 0
36455- 0 0 0 0 0 0 0 0 0 0 0 0
36456- 0 0 0 0 0 0 0 0 0 0 0 0
36457- 0 0 0 0 0 0 0 0 0 0 0 0
36458- 6 6 6 18 18 18 42 42 42 82 82 82
36459- 26 26 26 2 2 6 2 2 6 2 2 6
36460- 2 2 6 2 2 6 2 2 6 2 2 6
36461- 2 2 6 2 2 6 2 2 6 14 14 14
36462- 46 46 46 34 34 34 6 6 6 2 2 6
36463- 42 42 42 78 78 78 42 42 42 18 18 18
36464- 6 6 6 0 0 0 0 0 0 0 0 0
36465- 0 0 0 0 0 0 0 0 0 0 0 0
36466- 0 0 0 0 0 0 0 0 0 0 0 0
36467- 0 0 0 0 0 0 0 0 0 0 0 0
36468- 0 0 0 0 0 0 0 0 0 0 0 0
36469- 0 0 0 0 0 0 0 0 0 0 0 0
36470- 0 0 0 0 0 0 0 0 0 0 0 0
36471- 0 0 0 0 0 0 0 0 0 0 0 0
36472- 0 0 1 0 0 0 0 0 1 0 0 0
36473- 0 0 0 0 0 0 0 0 0 0 0 0
36474- 0 0 0 0 0 0 0 0 0 0 0 0
36475- 0 0 0 0 0 0 0 0 0 0 0 0
36476- 0 0 0 0 0 0 0 0 0 0 0 0
36477- 0 0 0 0 0 0 0 0 0 0 0 0
36478- 10 10 10 30 30 30 66 66 66 58 58 58
36479- 2 2 6 2 2 6 2 2 6 2 2 6
36480- 2 2 6 2 2 6 2 2 6 2 2 6
36481- 2 2 6 2 2 6 2 2 6 26 26 26
36482- 86 86 86 101 101 101 46 46 46 10 10 10
36483- 2 2 6 58 58 58 70 70 70 34 34 34
36484- 10 10 10 0 0 0 0 0 0 0 0 0
36485- 0 0 0 0 0 0 0 0 0 0 0 0
36486- 0 0 0 0 0 0 0 0 0 0 0 0
36487- 0 0 0 0 0 0 0 0 0 0 0 0
36488- 0 0 0 0 0 0 0 0 0 0 0 0
36489- 0 0 0 0 0 0 0 0 0 0 0 0
36490- 0 0 0 0 0 0 0 0 0 0 0 0
36491- 0 0 0 0 0 0 0 0 0 0 0 0
36492- 0 0 1 0 0 1 0 0 1 0 0 0
36493- 0 0 0 0 0 0 0 0 0 0 0 0
36494- 0 0 0 0 0 0 0 0 0 0 0 0
36495- 0 0 0 0 0 0 0 0 0 0 0 0
36496- 0 0 0 0 0 0 0 0 0 0 0 0
36497- 0 0 0 0 0 0 0 0 0 0 0 0
36498- 14 14 14 42 42 42 86 86 86 10 10 10
36499- 2 2 6 2 2 6 2 2 6 2 2 6
36500- 2 2 6 2 2 6 2 2 6 2 2 6
36501- 2 2 6 2 2 6 2 2 6 30 30 30
36502- 94 94 94 94 94 94 58 58 58 26 26 26
36503- 2 2 6 6 6 6 78 78 78 54 54 54
36504- 22 22 22 6 6 6 0 0 0 0 0 0
36505- 0 0 0 0 0 0 0 0 0 0 0 0
36506- 0 0 0 0 0 0 0 0 0 0 0 0
36507- 0 0 0 0 0 0 0 0 0 0 0 0
36508- 0 0 0 0 0 0 0 0 0 0 0 0
36509- 0 0 0 0 0 0 0 0 0 0 0 0
36510- 0 0 0 0 0 0 0 0 0 0 0 0
36511- 0 0 0 0 0 0 0 0 0 0 0 0
36512- 0 0 0 0 0 0 0 0 0 0 0 0
36513- 0 0 0 0 0 0 0 0 0 0 0 0
36514- 0 0 0 0 0 0 0 0 0 0 0 0
36515- 0 0 0 0 0 0 0 0 0 0 0 0
36516- 0 0 0 0 0 0 0 0 0 0 0 0
36517- 0 0 0 0 0 0 0 0 0 6 6 6
36518- 22 22 22 62 62 62 62 62 62 2 2 6
36519- 2 2 6 2 2 6 2 2 6 2 2 6
36520- 2 2 6 2 2 6 2 2 6 2 2 6
36521- 2 2 6 2 2 6 2 2 6 26 26 26
36522- 54 54 54 38 38 38 18 18 18 10 10 10
36523- 2 2 6 2 2 6 34 34 34 82 82 82
36524- 38 38 38 14 14 14 0 0 0 0 0 0
36525- 0 0 0 0 0 0 0 0 0 0 0 0
36526- 0 0 0 0 0 0 0 0 0 0 0 0
36527- 0 0 0 0 0 0 0 0 0 0 0 0
36528- 0 0 0 0 0 0 0 0 0 0 0 0
36529- 0 0 0 0 0 0 0 0 0 0 0 0
36530- 0 0 0 0 0 0 0 0 0 0 0 0
36531- 0 0 0 0 0 0 0 0 0 0 0 0
36532- 0 0 0 0 0 1 0 0 1 0 0 0
36533- 0 0 0 0 0 0 0 0 0 0 0 0
36534- 0 0 0 0 0 0 0 0 0 0 0 0
36535- 0 0 0 0 0 0 0 0 0 0 0 0
36536- 0 0 0 0 0 0 0 0 0 0 0 0
36537- 0 0 0 0 0 0 0 0 0 6 6 6
36538- 30 30 30 78 78 78 30 30 30 2 2 6
36539- 2 2 6 2 2 6 2 2 6 2 2 6
36540- 2 2 6 2 2 6 2 2 6 2 2 6
36541- 2 2 6 2 2 6 2 2 6 10 10 10
36542- 10 10 10 2 2 6 2 2 6 2 2 6
36543- 2 2 6 2 2 6 2 2 6 78 78 78
36544- 50 50 50 18 18 18 6 6 6 0 0 0
36545- 0 0 0 0 0 0 0 0 0 0 0 0
36546- 0 0 0 0 0 0 0 0 0 0 0 0
36547- 0 0 0 0 0 0 0 0 0 0 0 0
36548- 0 0 0 0 0 0 0 0 0 0 0 0
36549- 0 0 0 0 0 0 0 0 0 0 0 0
36550- 0 0 0 0 0 0 0 0 0 0 0 0
36551- 0 0 0 0 0 0 0 0 0 0 0 0
36552- 0 0 1 0 0 0 0 0 0 0 0 0
36553- 0 0 0 0 0 0 0 0 0 0 0 0
36554- 0 0 0 0 0 0 0 0 0 0 0 0
36555- 0 0 0 0 0 0 0 0 0 0 0 0
36556- 0 0 0 0 0 0 0 0 0 0 0 0
36557- 0 0 0 0 0 0 0 0 0 10 10 10
36558- 38 38 38 86 86 86 14 14 14 2 2 6
36559- 2 2 6 2 2 6 2 2 6 2 2 6
36560- 2 2 6 2 2 6 2 2 6 2 2 6
36561- 2 2 6 2 2 6 2 2 6 2 2 6
36562- 2 2 6 2 2 6 2 2 6 2 2 6
36563- 2 2 6 2 2 6 2 2 6 54 54 54
36564- 66 66 66 26 26 26 6 6 6 0 0 0
36565- 0 0 0 0 0 0 0 0 0 0 0 0
36566- 0 0 0 0 0 0 0 0 0 0 0 0
36567- 0 0 0 0 0 0 0 0 0 0 0 0
36568- 0 0 0 0 0 0 0 0 0 0 0 0
36569- 0 0 0 0 0 0 0 0 0 0 0 0
36570- 0 0 0 0 0 0 0 0 0 0 0 0
36571- 0 0 0 0 0 0 0 0 0 0 0 0
36572- 0 0 0 0 0 1 0 0 1 0 0 0
36573- 0 0 0 0 0 0 0 0 0 0 0 0
36574- 0 0 0 0 0 0 0 0 0 0 0 0
36575- 0 0 0 0 0 0 0 0 0 0 0 0
36576- 0 0 0 0 0 0 0 0 0 0 0 0
36577- 0 0 0 0 0 0 0 0 0 14 14 14
36578- 42 42 42 82 82 82 2 2 6 2 2 6
36579- 2 2 6 6 6 6 10 10 10 2 2 6
36580- 2 2 6 2 2 6 2 2 6 2 2 6
36581- 2 2 6 2 2 6 2 2 6 6 6 6
36582- 14 14 14 10 10 10 2 2 6 2 2 6
36583- 2 2 6 2 2 6 2 2 6 18 18 18
36584- 82 82 82 34 34 34 10 10 10 0 0 0
36585- 0 0 0 0 0 0 0 0 0 0 0 0
36586- 0 0 0 0 0 0 0 0 0 0 0 0
36587- 0 0 0 0 0 0 0 0 0 0 0 0
36588- 0 0 0 0 0 0 0 0 0 0 0 0
36589- 0 0 0 0 0 0 0 0 0 0 0 0
36590- 0 0 0 0 0 0 0 0 0 0 0 0
36591- 0 0 0 0 0 0 0 0 0 0 0 0
36592- 0 0 1 0 0 0 0 0 0 0 0 0
36593- 0 0 0 0 0 0 0 0 0 0 0 0
36594- 0 0 0 0 0 0 0 0 0 0 0 0
36595- 0 0 0 0 0 0 0 0 0 0 0 0
36596- 0 0 0 0 0 0 0 0 0 0 0 0
36597- 0 0 0 0 0 0 0 0 0 14 14 14
36598- 46 46 46 86 86 86 2 2 6 2 2 6
36599- 6 6 6 6 6 6 22 22 22 34 34 34
36600- 6 6 6 2 2 6 2 2 6 2 2 6
36601- 2 2 6 2 2 6 18 18 18 34 34 34
36602- 10 10 10 50 50 50 22 22 22 2 2 6
36603- 2 2 6 2 2 6 2 2 6 10 10 10
36604- 86 86 86 42 42 42 14 14 14 0 0 0
36605- 0 0 0 0 0 0 0 0 0 0 0 0
36606- 0 0 0 0 0 0 0 0 0 0 0 0
36607- 0 0 0 0 0 0 0 0 0 0 0 0
36608- 0 0 0 0 0 0 0 0 0 0 0 0
36609- 0 0 0 0 0 0 0 0 0 0 0 0
36610- 0 0 0 0 0 0 0 0 0 0 0 0
36611- 0 0 0 0 0 0 0 0 0 0 0 0
36612- 0 0 1 0 0 1 0 0 1 0 0 0
36613- 0 0 0 0 0 0 0 0 0 0 0 0
36614- 0 0 0 0 0 0 0 0 0 0 0 0
36615- 0 0 0 0 0 0 0 0 0 0 0 0
36616- 0 0 0 0 0 0 0 0 0 0 0 0
36617- 0 0 0 0 0 0 0 0 0 14 14 14
36618- 46 46 46 86 86 86 2 2 6 2 2 6
36619- 38 38 38 116 116 116 94 94 94 22 22 22
36620- 22 22 22 2 2 6 2 2 6 2 2 6
36621- 14 14 14 86 86 86 138 138 138 162 162 162
36622-154 154 154 38 38 38 26 26 26 6 6 6
36623- 2 2 6 2 2 6 2 2 6 2 2 6
36624- 86 86 86 46 46 46 14 14 14 0 0 0
36625- 0 0 0 0 0 0 0 0 0 0 0 0
36626- 0 0 0 0 0 0 0 0 0 0 0 0
36627- 0 0 0 0 0 0 0 0 0 0 0 0
36628- 0 0 0 0 0 0 0 0 0 0 0 0
36629- 0 0 0 0 0 0 0 0 0 0 0 0
36630- 0 0 0 0 0 0 0 0 0 0 0 0
36631- 0 0 0 0 0 0 0 0 0 0 0 0
36632- 0 0 0 0 0 0 0 0 0 0 0 0
36633- 0 0 0 0 0 0 0 0 0 0 0 0
36634- 0 0 0 0 0 0 0 0 0 0 0 0
36635- 0 0 0 0 0 0 0 0 0 0 0 0
36636- 0 0 0 0 0 0 0 0 0 0 0 0
36637- 0 0 0 0 0 0 0 0 0 14 14 14
36638- 46 46 46 86 86 86 2 2 6 14 14 14
36639-134 134 134 198 198 198 195 195 195 116 116 116
36640- 10 10 10 2 2 6 2 2 6 6 6 6
36641-101 98 89 187 187 187 210 210 210 218 218 218
36642-214 214 214 134 134 134 14 14 14 6 6 6
36643- 2 2 6 2 2 6 2 2 6 2 2 6
36644- 86 86 86 50 50 50 18 18 18 6 6 6
36645- 0 0 0 0 0 0 0 0 0 0 0 0
36646- 0 0 0 0 0 0 0 0 0 0 0 0
36647- 0 0 0 0 0 0 0 0 0 0 0 0
36648- 0 0 0 0 0 0 0 0 0 0 0 0
36649- 0 0 0 0 0 0 0 0 0 0 0 0
36650- 0 0 0 0 0 0 0 0 0 0 0 0
36651- 0 0 0 0 0 0 0 0 1 0 0 0
36652- 0 0 1 0 0 1 0 0 1 0 0 0
36653- 0 0 0 0 0 0 0 0 0 0 0 0
36654- 0 0 0 0 0 0 0 0 0 0 0 0
36655- 0 0 0 0 0 0 0 0 0 0 0 0
36656- 0 0 0 0 0 0 0 0 0 0 0 0
36657- 0 0 0 0 0 0 0 0 0 14 14 14
36658- 46 46 46 86 86 86 2 2 6 54 54 54
36659-218 218 218 195 195 195 226 226 226 246 246 246
36660- 58 58 58 2 2 6 2 2 6 30 30 30
36661-210 210 210 253 253 253 174 174 174 123 123 123
36662-221 221 221 234 234 234 74 74 74 2 2 6
36663- 2 2 6 2 2 6 2 2 6 2 2 6
36664- 70 70 70 58 58 58 22 22 22 6 6 6
36665- 0 0 0 0 0 0 0 0 0 0 0 0
36666- 0 0 0 0 0 0 0 0 0 0 0 0
36667- 0 0 0 0 0 0 0 0 0 0 0 0
36668- 0 0 0 0 0 0 0 0 0 0 0 0
36669- 0 0 0 0 0 0 0 0 0 0 0 0
36670- 0 0 0 0 0 0 0 0 0 0 0 0
36671- 0 0 0 0 0 0 0 0 0 0 0 0
36672- 0 0 0 0 0 0 0 0 0 0 0 0
36673- 0 0 0 0 0 0 0 0 0 0 0 0
36674- 0 0 0 0 0 0 0 0 0 0 0 0
36675- 0 0 0 0 0 0 0 0 0 0 0 0
36676- 0 0 0 0 0 0 0 0 0 0 0 0
36677- 0 0 0 0 0 0 0 0 0 14 14 14
36678- 46 46 46 82 82 82 2 2 6 106 106 106
36679-170 170 170 26 26 26 86 86 86 226 226 226
36680-123 123 123 10 10 10 14 14 14 46 46 46
36681-231 231 231 190 190 190 6 6 6 70 70 70
36682- 90 90 90 238 238 238 158 158 158 2 2 6
36683- 2 2 6 2 2 6 2 2 6 2 2 6
36684- 70 70 70 58 58 58 22 22 22 6 6 6
36685- 0 0 0 0 0 0 0 0 0 0 0 0
36686- 0 0 0 0 0 0 0 0 0 0 0 0
36687- 0 0 0 0 0 0 0 0 0 0 0 0
36688- 0 0 0 0 0 0 0 0 0 0 0 0
36689- 0 0 0 0 0 0 0 0 0 0 0 0
36690- 0 0 0 0 0 0 0 0 0 0 0 0
36691- 0 0 0 0 0 0 0 0 1 0 0 0
36692- 0 0 1 0 0 1 0 0 1 0 0 0
36693- 0 0 0 0 0 0 0 0 0 0 0 0
36694- 0 0 0 0 0 0 0 0 0 0 0 0
36695- 0 0 0 0 0 0 0 0 0 0 0 0
36696- 0 0 0 0 0 0 0 0 0 0 0 0
36697- 0 0 0 0 0 0 0 0 0 14 14 14
36698- 42 42 42 86 86 86 6 6 6 116 116 116
36699-106 106 106 6 6 6 70 70 70 149 149 149
36700-128 128 128 18 18 18 38 38 38 54 54 54
36701-221 221 221 106 106 106 2 2 6 14 14 14
36702- 46 46 46 190 190 190 198 198 198 2 2 6
36703- 2 2 6 2 2 6 2 2 6 2 2 6
36704- 74 74 74 62 62 62 22 22 22 6 6 6
36705- 0 0 0 0 0 0 0 0 0 0 0 0
36706- 0 0 0 0 0 0 0 0 0 0 0 0
36707- 0 0 0 0 0 0 0 0 0 0 0 0
36708- 0 0 0 0 0 0 0 0 0 0 0 0
36709- 0 0 0 0 0 0 0 0 0 0 0 0
36710- 0 0 0 0 0 0 0 0 0 0 0 0
36711- 0 0 0 0 0 0 0 0 1 0 0 0
36712- 0 0 1 0 0 0 0 0 1 0 0 0
36713- 0 0 0 0 0 0 0 0 0 0 0 0
36714- 0 0 0 0 0 0 0 0 0 0 0 0
36715- 0 0 0 0 0 0 0 0 0 0 0 0
36716- 0 0 0 0 0 0 0 0 0 0 0 0
36717- 0 0 0 0 0 0 0 0 0 14 14 14
36718- 42 42 42 94 94 94 14 14 14 101 101 101
36719-128 128 128 2 2 6 18 18 18 116 116 116
36720-118 98 46 121 92 8 121 92 8 98 78 10
36721-162 162 162 106 106 106 2 2 6 2 2 6
36722- 2 2 6 195 195 195 195 195 195 6 6 6
36723- 2 2 6 2 2 6 2 2 6 2 2 6
36724- 74 74 74 62 62 62 22 22 22 6 6 6
36725- 0 0 0 0 0 0 0 0 0 0 0 0
36726- 0 0 0 0 0 0 0 0 0 0 0 0
36727- 0 0 0 0 0 0 0 0 0 0 0 0
36728- 0 0 0 0 0 0 0 0 0 0 0 0
36729- 0 0 0 0 0 0 0 0 0 0 0 0
36730- 0 0 0 0 0 0 0 0 0 0 0 0
36731- 0 0 0 0 0 0 0 0 1 0 0 1
36732- 0 0 1 0 0 0 0 0 1 0 0 0
36733- 0 0 0 0 0 0 0 0 0 0 0 0
36734- 0 0 0 0 0 0 0 0 0 0 0 0
36735- 0 0 0 0 0 0 0 0 0 0 0 0
36736- 0 0 0 0 0 0 0 0 0 0 0 0
36737- 0 0 0 0 0 0 0 0 0 10 10 10
36738- 38 38 38 90 90 90 14 14 14 58 58 58
36739-210 210 210 26 26 26 54 38 6 154 114 10
36740-226 170 11 236 186 11 225 175 15 184 144 12
36741-215 174 15 175 146 61 37 26 9 2 2 6
36742- 70 70 70 246 246 246 138 138 138 2 2 6
36743- 2 2 6 2 2 6 2 2 6 2 2 6
36744- 70 70 70 66 66 66 26 26 26 6 6 6
36745- 0 0 0 0 0 0 0 0 0 0 0 0
36746- 0 0 0 0 0 0 0 0 0 0 0 0
36747- 0 0 0 0 0 0 0 0 0 0 0 0
36748- 0 0 0 0 0 0 0 0 0 0 0 0
36749- 0 0 0 0 0 0 0 0 0 0 0 0
36750- 0 0 0 0 0 0 0 0 0 0 0 0
36751- 0 0 0 0 0 0 0 0 0 0 0 0
36752- 0 0 0 0 0 0 0 0 0 0 0 0
36753- 0 0 0 0 0 0 0 0 0 0 0 0
36754- 0 0 0 0 0 0 0 0 0 0 0 0
36755- 0 0 0 0 0 0 0 0 0 0 0 0
36756- 0 0 0 0 0 0 0 0 0 0 0 0
36757- 0 0 0 0 0 0 0 0 0 10 10 10
36758- 38 38 38 86 86 86 14 14 14 10 10 10
36759-195 195 195 188 164 115 192 133 9 225 175 15
36760-239 182 13 234 190 10 232 195 16 232 200 30
36761-245 207 45 241 208 19 232 195 16 184 144 12
36762-218 194 134 211 206 186 42 42 42 2 2 6
36763- 2 2 6 2 2 6 2 2 6 2 2 6
36764- 50 50 50 74 74 74 30 30 30 6 6 6
36765- 0 0 0 0 0 0 0 0 0 0 0 0
36766- 0 0 0 0 0 0 0 0 0 0 0 0
36767- 0 0 0 0 0 0 0 0 0 0 0 0
36768- 0 0 0 0 0 0 0 0 0 0 0 0
36769- 0 0 0 0 0 0 0 0 0 0 0 0
36770- 0 0 0 0 0 0 0 0 0 0 0 0
36771- 0 0 0 0 0 0 0 0 0 0 0 0
36772- 0 0 0 0 0 0 0 0 0 0 0 0
36773- 0 0 0 0 0 0 0 0 0 0 0 0
36774- 0 0 0 0 0 0 0 0 0 0 0 0
36775- 0 0 0 0 0 0 0 0 0 0 0 0
36776- 0 0 0 0 0 0 0 0 0 0 0 0
36777- 0 0 0 0 0 0 0 0 0 10 10 10
36778- 34 34 34 86 86 86 14 14 14 2 2 6
36779-121 87 25 192 133 9 219 162 10 239 182 13
36780-236 186 11 232 195 16 241 208 19 244 214 54
36781-246 218 60 246 218 38 246 215 20 241 208 19
36782-241 208 19 226 184 13 121 87 25 2 2 6
36783- 2 2 6 2 2 6 2 2 6 2 2 6
36784- 50 50 50 82 82 82 34 34 34 10 10 10
36785- 0 0 0 0 0 0 0 0 0 0 0 0
36786- 0 0 0 0 0 0 0 0 0 0 0 0
36787- 0 0 0 0 0 0 0 0 0 0 0 0
36788- 0 0 0 0 0 0 0 0 0 0 0 0
36789- 0 0 0 0 0 0 0 0 0 0 0 0
36790- 0 0 0 0 0 0 0 0 0 0 0 0
36791- 0 0 0 0 0 0 0 0 0 0 0 0
36792- 0 0 0 0 0 0 0 0 0 0 0 0
36793- 0 0 0 0 0 0 0 0 0 0 0 0
36794- 0 0 0 0 0 0 0 0 0 0 0 0
36795- 0 0 0 0 0 0 0 0 0 0 0 0
36796- 0 0 0 0 0 0 0 0 0 0 0 0
36797- 0 0 0 0 0 0 0 0 0 10 10 10
36798- 34 34 34 82 82 82 30 30 30 61 42 6
36799-180 123 7 206 145 10 230 174 11 239 182 13
36800-234 190 10 238 202 15 241 208 19 246 218 74
36801-246 218 38 246 215 20 246 215 20 246 215 20
36802-226 184 13 215 174 15 184 144 12 6 6 6
36803- 2 2 6 2 2 6 2 2 6 2 2 6
36804- 26 26 26 94 94 94 42 42 42 14 14 14
36805- 0 0 0 0 0 0 0 0 0 0 0 0
36806- 0 0 0 0 0 0 0 0 0 0 0 0
36807- 0 0 0 0 0 0 0 0 0 0 0 0
36808- 0 0 0 0 0 0 0 0 0 0 0 0
36809- 0 0 0 0 0 0 0 0 0 0 0 0
36810- 0 0 0 0 0 0 0 0 0 0 0 0
36811- 0 0 0 0 0 0 0 0 0 0 0 0
36812- 0 0 0 0 0 0 0 0 0 0 0 0
36813- 0 0 0 0 0 0 0 0 0 0 0 0
36814- 0 0 0 0 0 0 0 0 0 0 0 0
36815- 0 0 0 0 0 0 0 0 0 0 0 0
36816- 0 0 0 0 0 0 0 0 0 0 0 0
36817- 0 0 0 0 0 0 0 0 0 10 10 10
36818- 30 30 30 78 78 78 50 50 50 104 69 6
36819-192 133 9 216 158 10 236 178 12 236 186 11
36820-232 195 16 241 208 19 244 214 54 245 215 43
36821-246 215 20 246 215 20 241 208 19 198 155 10
36822-200 144 11 216 158 10 156 118 10 2 2 6
36823- 2 2 6 2 2 6 2 2 6 2 2 6
36824- 6 6 6 90 90 90 54 54 54 18 18 18
36825- 6 6 6 0 0 0 0 0 0 0 0 0
36826- 0 0 0 0 0 0 0 0 0 0 0 0
36827- 0 0 0 0 0 0 0 0 0 0 0 0
36828- 0 0 0 0 0 0 0 0 0 0 0 0
36829- 0 0 0 0 0 0 0 0 0 0 0 0
36830- 0 0 0 0 0 0 0 0 0 0 0 0
36831- 0 0 0 0 0 0 0 0 0 0 0 0
36832- 0 0 0 0 0 0 0 0 0 0 0 0
36833- 0 0 0 0 0 0 0 0 0 0 0 0
36834- 0 0 0 0 0 0 0 0 0 0 0 0
36835- 0 0 0 0 0 0 0 0 0 0 0 0
36836- 0 0 0 0 0 0 0 0 0 0 0 0
36837- 0 0 0 0 0 0 0 0 0 10 10 10
36838- 30 30 30 78 78 78 46 46 46 22 22 22
36839-137 92 6 210 162 10 239 182 13 238 190 10
36840-238 202 15 241 208 19 246 215 20 246 215 20
36841-241 208 19 203 166 17 185 133 11 210 150 10
36842-216 158 10 210 150 10 102 78 10 2 2 6
36843- 6 6 6 54 54 54 14 14 14 2 2 6
36844- 2 2 6 62 62 62 74 74 74 30 30 30
36845- 10 10 10 0 0 0 0 0 0 0 0 0
36846- 0 0 0 0 0 0 0 0 0 0 0 0
36847- 0 0 0 0 0 0 0 0 0 0 0 0
36848- 0 0 0 0 0 0 0 0 0 0 0 0
36849- 0 0 0 0 0 0 0 0 0 0 0 0
36850- 0 0 0 0 0 0 0 0 0 0 0 0
36851- 0 0 0 0 0 0 0 0 0 0 0 0
36852- 0 0 0 0 0 0 0 0 0 0 0 0
36853- 0 0 0 0 0 0 0 0 0 0 0 0
36854- 0 0 0 0 0 0 0 0 0 0 0 0
36855- 0 0 0 0 0 0 0 0 0 0 0 0
36856- 0 0 0 0 0 0 0 0 0 0 0 0
36857- 0 0 0 0 0 0 0 0 0 10 10 10
36858- 34 34 34 78 78 78 50 50 50 6 6 6
36859- 94 70 30 139 102 15 190 146 13 226 184 13
36860-232 200 30 232 195 16 215 174 15 190 146 13
36861-168 122 10 192 133 9 210 150 10 213 154 11
36862-202 150 34 182 157 106 101 98 89 2 2 6
36863- 2 2 6 78 78 78 116 116 116 58 58 58
36864- 2 2 6 22 22 22 90 90 90 46 46 46
36865- 18 18 18 6 6 6 0 0 0 0 0 0
36866- 0 0 0 0 0 0 0 0 0 0 0 0
36867- 0 0 0 0 0 0 0 0 0 0 0 0
36868- 0 0 0 0 0 0 0 0 0 0 0 0
36869- 0 0 0 0 0 0 0 0 0 0 0 0
36870- 0 0 0 0 0 0 0 0 0 0 0 0
36871- 0 0 0 0 0 0 0 0 0 0 0 0
36872- 0 0 0 0 0 0 0 0 0 0 0 0
36873- 0 0 0 0 0 0 0 0 0 0 0 0
36874- 0 0 0 0 0 0 0 0 0 0 0 0
36875- 0 0 0 0 0 0 0 0 0 0 0 0
36876- 0 0 0 0 0 0 0 0 0 0 0 0
36877- 0 0 0 0 0 0 0 0 0 10 10 10
36878- 38 38 38 86 86 86 50 50 50 6 6 6
36879-128 128 128 174 154 114 156 107 11 168 122 10
36880-198 155 10 184 144 12 197 138 11 200 144 11
36881-206 145 10 206 145 10 197 138 11 188 164 115
36882-195 195 195 198 198 198 174 174 174 14 14 14
36883- 2 2 6 22 22 22 116 116 116 116 116 116
36884- 22 22 22 2 2 6 74 74 74 70 70 70
36885- 30 30 30 10 10 10 0 0 0 0 0 0
36886- 0 0 0 0 0 0 0 0 0 0 0 0
36887- 0 0 0 0 0 0 0 0 0 0 0 0
36888- 0 0 0 0 0 0 0 0 0 0 0 0
36889- 0 0 0 0 0 0 0 0 0 0 0 0
36890- 0 0 0 0 0 0 0 0 0 0 0 0
36891- 0 0 0 0 0 0 0 0 0 0 0 0
36892- 0 0 0 0 0 0 0 0 0 0 0 0
36893- 0 0 0 0 0 0 0 0 0 0 0 0
36894- 0 0 0 0 0 0 0 0 0 0 0 0
36895- 0 0 0 0 0 0 0 0 0 0 0 0
36896- 0 0 0 0 0 0 0 0 0 0 0 0
36897- 0 0 0 0 0 0 6 6 6 18 18 18
36898- 50 50 50 101 101 101 26 26 26 10 10 10
36899-138 138 138 190 190 190 174 154 114 156 107 11
36900-197 138 11 200 144 11 197 138 11 192 133 9
36901-180 123 7 190 142 34 190 178 144 187 187 187
36902-202 202 202 221 221 221 214 214 214 66 66 66
36903- 2 2 6 2 2 6 50 50 50 62 62 62
36904- 6 6 6 2 2 6 10 10 10 90 90 90
36905- 50 50 50 18 18 18 6 6 6 0 0 0
36906- 0 0 0 0 0 0 0 0 0 0 0 0
36907- 0 0 0 0 0 0 0 0 0 0 0 0
36908- 0 0 0 0 0 0 0 0 0 0 0 0
36909- 0 0 0 0 0 0 0 0 0 0 0 0
36910- 0 0 0 0 0 0 0 0 0 0 0 0
36911- 0 0 0 0 0 0 0 0 0 0 0 0
36912- 0 0 0 0 0 0 0 0 0 0 0 0
36913- 0 0 0 0 0 0 0 0 0 0 0 0
36914- 0 0 0 0 0 0 0 0 0 0 0 0
36915- 0 0 0 0 0 0 0 0 0 0 0 0
36916- 0 0 0 0 0 0 0 0 0 0 0 0
36917- 0 0 0 0 0 0 10 10 10 34 34 34
36918- 74 74 74 74 74 74 2 2 6 6 6 6
36919-144 144 144 198 198 198 190 190 190 178 166 146
36920-154 121 60 156 107 11 156 107 11 168 124 44
36921-174 154 114 187 187 187 190 190 190 210 210 210
36922-246 246 246 253 253 253 253 253 253 182 182 182
36923- 6 6 6 2 2 6 2 2 6 2 2 6
36924- 2 2 6 2 2 6 2 2 6 62 62 62
36925- 74 74 74 34 34 34 14 14 14 0 0 0
36926- 0 0 0 0 0 0 0 0 0 0 0 0
36927- 0 0 0 0 0 0 0 0 0 0 0 0
36928- 0 0 0 0 0 0 0 0 0 0 0 0
36929- 0 0 0 0 0 0 0 0 0 0 0 0
36930- 0 0 0 0 0 0 0 0 0 0 0 0
36931- 0 0 0 0 0 0 0 0 0 0 0 0
36932- 0 0 0 0 0 0 0 0 0 0 0 0
36933- 0 0 0 0 0 0 0 0 0 0 0 0
36934- 0 0 0 0 0 0 0 0 0 0 0 0
36935- 0 0 0 0 0 0 0 0 0 0 0 0
36936- 0 0 0 0 0 0 0 0 0 0 0 0
36937- 0 0 0 10 10 10 22 22 22 54 54 54
36938- 94 94 94 18 18 18 2 2 6 46 46 46
36939-234 234 234 221 221 221 190 190 190 190 190 190
36940-190 190 190 187 187 187 187 187 187 190 190 190
36941-190 190 190 195 195 195 214 214 214 242 242 242
36942-253 253 253 253 253 253 253 253 253 253 253 253
36943- 82 82 82 2 2 6 2 2 6 2 2 6
36944- 2 2 6 2 2 6 2 2 6 14 14 14
36945- 86 86 86 54 54 54 22 22 22 6 6 6
36946- 0 0 0 0 0 0 0 0 0 0 0 0
36947- 0 0 0 0 0 0 0 0 0 0 0 0
36948- 0 0 0 0 0 0 0 0 0 0 0 0
36949- 0 0 0 0 0 0 0 0 0 0 0 0
36950- 0 0 0 0 0 0 0 0 0 0 0 0
36951- 0 0 0 0 0 0 0 0 0 0 0 0
36952- 0 0 0 0 0 0 0 0 0 0 0 0
36953- 0 0 0 0 0 0 0 0 0 0 0 0
36954- 0 0 0 0 0 0 0 0 0 0 0 0
36955- 0 0 0 0 0 0 0 0 0 0 0 0
36956- 0 0 0 0 0 0 0 0 0 0 0 0
36957- 6 6 6 18 18 18 46 46 46 90 90 90
36958- 46 46 46 18 18 18 6 6 6 182 182 182
36959-253 253 253 246 246 246 206 206 206 190 190 190
36960-190 190 190 190 190 190 190 190 190 190 190 190
36961-206 206 206 231 231 231 250 250 250 253 253 253
36962-253 253 253 253 253 253 253 253 253 253 253 253
36963-202 202 202 14 14 14 2 2 6 2 2 6
36964- 2 2 6 2 2 6 2 2 6 2 2 6
36965- 42 42 42 86 86 86 42 42 42 18 18 18
36966- 6 6 6 0 0 0 0 0 0 0 0 0
36967- 0 0 0 0 0 0 0 0 0 0 0 0
36968- 0 0 0 0 0 0 0 0 0 0 0 0
36969- 0 0 0 0 0 0 0 0 0 0 0 0
36970- 0 0 0 0 0 0 0 0 0 0 0 0
36971- 0 0 0 0 0 0 0 0 0 0 0 0
36972- 0 0 0 0 0 0 0 0 0 0 0 0
36973- 0 0 0 0 0 0 0 0 0 0 0 0
36974- 0 0 0 0 0 0 0 0 0 0 0 0
36975- 0 0 0 0 0 0 0 0 0 0 0 0
36976- 0 0 0 0 0 0 0 0 0 6 6 6
36977- 14 14 14 38 38 38 74 74 74 66 66 66
36978- 2 2 6 6 6 6 90 90 90 250 250 250
36979-253 253 253 253 253 253 238 238 238 198 198 198
36980-190 190 190 190 190 190 195 195 195 221 221 221
36981-246 246 246 253 253 253 253 253 253 253 253 253
36982-253 253 253 253 253 253 253 253 253 253 253 253
36983-253 253 253 82 82 82 2 2 6 2 2 6
36984- 2 2 6 2 2 6 2 2 6 2 2 6
36985- 2 2 6 78 78 78 70 70 70 34 34 34
36986- 14 14 14 6 6 6 0 0 0 0 0 0
36987- 0 0 0 0 0 0 0 0 0 0 0 0
36988- 0 0 0 0 0 0 0 0 0 0 0 0
36989- 0 0 0 0 0 0 0 0 0 0 0 0
36990- 0 0 0 0 0 0 0 0 0 0 0 0
36991- 0 0 0 0 0 0 0 0 0 0 0 0
36992- 0 0 0 0 0 0 0 0 0 0 0 0
36993- 0 0 0 0 0 0 0 0 0 0 0 0
36994- 0 0 0 0 0 0 0 0 0 0 0 0
36995- 0 0 0 0 0 0 0 0 0 0 0 0
36996- 0 0 0 0 0 0 0 0 0 14 14 14
36997- 34 34 34 66 66 66 78 78 78 6 6 6
36998- 2 2 6 18 18 18 218 218 218 253 253 253
36999-253 253 253 253 253 253 253 253 253 246 246 246
37000-226 226 226 231 231 231 246 246 246 253 253 253
37001-253 253 253 253 253 253 253 253 253 253 253 253
37002-253 253 253 253 253 253 253 253 253 253 253 253
37003-253 253 253 178 178 178 2 2 6 2 2 6
37004- 2 2 6 2 2 6 2 2 6 2 2 6
37005- 2 2 6 18 18 18 90 90 90 62 62 62
37006- 30 30 30 10 10 10 0 0 0 0 0 0
37007- 0 0 0 0 0 0 0 0 0 0 0 0
37008- 0 0 0 0 0 0 0 0 0 0 0 0
37009- 0 0 0 0 0 0 0 0 0 0 0 0
37010- 0 0 0 0 0 0 0 0 0 0 0 0
37011- 0 0 0 0 0 0 0 0 0 0 0 0
37012- 0 0 0 0 0 0 0 0 0 0 0 0
37013- 0 0 0 0 0 0 0 0 0 0 0 0
37014- 0 0 0 0 0 0 0 0 0 0 0 0
37015- 0 0 0 0 0 0 0 0 0 0 0 0
37016- 0 0 0 0 0 0 10 10 10 26 26 26
37017- 58 58 58 90 90 90 18 18 18 2 2 6
37018- 2 2 6 110 110 110 253 253 253 253 253 253
37019-253 253 253 253 253 253 253 253 253 253 253 253
37020-250 250 250 253 253 253 253 253 253 253 253 253
37021-253 253 253 253 253 253 253 253 253 253 253 253
37022-253 253 253 253 253 253 253 253 253 253 253 253
37023-253 253 253 231 231 231 18 18 18 2 2 6
37024- 2 2 6 2 2 6 2 2 6 2 2 6
37025- 2 2 6 2 2 6 18 18 18 94 94 94
37026- 54 54 54 26 26 26 10 10 10 0 0 0
37027- 0 0 0 0 0 0 0 0 0 0 0 0
37028- 0 0 0 0 0 0 0 0 0 0 0 0
37029- 0 0 0 0 0 0 0 0 0 0 0 0
37030- 0 0 0 0 0 0 0 0 0 0 0 0
37031- 0 0 0 0 0 0 0 0 0 0 0 0
37032- 0 0 0 0 0 0 0 0 0 0 0 0
37033- 0 0 0 0 0 0 0 0 0 0 0 0
37034- 0 0 0 0 0 0 0 0 0 0 0 0
37035- 0 0 0 0 0 0 0 0 0 0 0 0
37036- 0 0 0 6 6 6 22 22 22 50 50 50
37037- 90 90 90 26 26 26 2 2 6 2 2 6
37038- 14 14 14 195 195 195 250 250 250 253 253 253
37039-253 253 253 253 253 253 253 253 253 253 253 253
37040-253 253 253 253 253 253 253 253 253 253 253 253
37041-253 253 253 253 253 253 253 253 253 253 253 253
37042-253 253 253 253 253 253 253 253 253 253 253 253
37043-250 250 250 242 242 242 54 54 54 2 2 6
37044- 2 2 6 2 2 6 2 2 6 2 2 6
37045- 2 2 6 2 2 6 2 2 6 38 38 38
37046- 86 86 86 50 50 50 22 22 22 6 6 6
37047- 0 0 0 0 0 0 0 0 0 0 0 0
37048- 0 0 0 0 0 0 0 0 0 0 0 0
37049- 0 0 0 0 0 0 0 0 0 0 0 0
37050- 0 0 0 0 0 0 0 0 0 0 0 0
37051- 0 0 0 0 0 0 0 0 0 0 0 0
37052- 0 0 0 0 0 0 0 0 0 0 0 0
37053- 0 0 0 0 0 0 0 0 0 0 0 0
37054- 0 0 0 0 0 0 0 0 0 0 0 0
37055- 0 0 0 0 0 0 0 0 0 0 0 0
37056- 6 6 6 14 14 14 38 38 38 82 82 82
37057- 34 34 34 2 2 6 2 2 6 2 2 6
37058- 42 42 42 195 195 195 246 246 246 253 253 253
37059-253 253 253 253 253 253 253 253 253 250 250 250
37060-242 242 242 242 242 242 250 250 250 253 253 253
37061-253 253 253 253 253 253 253 253 253 253 253 253
37062-253 253 253 250 250 250 246 246 246 238 238 238
37063-226 226 226 231 231 231 101 101 101 6 6 6
37064- 2 2 6 2 2 6 2 2 6 2 2 6
37065- 2 2 6 2 2 6 2 2 6 2 2 6
37066- 38 38 38 82 82 82 42 42 42 14 14 14
37067- 6 6 6 0 0 0 0 0 0 0 0 0
37068- 0 0 0 0 0 0 0 0 0 0 0 0
37069- 0 0 0 0 0 0 0 0 0 0 0 0
37070- 0 0 0 0 0 0 0 0 0 0 0 0
37071- 0 0 0 0 0 0 0 0 0 0 0 0
37072- 0 0 0 0 0 0 0 0 0 0 0 0
37073- 0 0 0 0 0 0 0 0 0 0 0 0
37074- 0 0 0 0 0 0 0 0 0 0 0 0
37075- 0 0 0 0 0 0 0 0 0 0 0 0
37076- 10 10 10 26 26 26 62 62 62 66 66 66
37077- 2 2 6 2 2 6 2 2 6 6 6 6
37078- 70 70 70 170 170 170 206 206 206 234 234 234
37079-246 246 246 250 250 250 250 250 250 238 238 238
37080-226 226 226 231 231 231 238 238 238 250 250 250
37081-250 250 250 250 250 250 246 246 246 231 231 231
37082-214 214 214 206 206 206 202 202 202 202 202 202
37083-198 198 198 202 202 202 182 182 182 18 18 18
37084- 2 2 6 2 2 6 2 2 6 2 2 6
37085- 2 2 6 2 2 6 2 2 6 2 2 6
37086- 2 2 6 62 62 62 66 66 66 30 30 30
37087- 10 10 10 0 0 0 0 0 0 0 0 0
37088- 0 0 0 0 0 0 0 0 0 0 0 0
37089- 0 0 0 0 0 0 0 0 0 0 0 0
37090- 0 0 0 0 0 0 0 0 0 0 0 0
37091- 0 0 0 0 0 0 0 0 0 0 0 0
37092- 0 0 0 0 0 0 0 0 0 0 0 0
37093- 0 0 0 0 0 0 0 0 0 0 0 0
37094- 0 0 0 0 0 0 0 0 0 0 0 0
37095- 0 0 0 0 0 0 0 0 0 0 0 0
37096- 14 14 14 42 42 42 82 82 82 18 18 18
37097- 2 2 6 2 2 6 2 2 6 10 10 10
37098- 94 94 94 182 182 182 218 218 218 242 242 242
37099-250 250 250 253 253 253 253 253 253 250 250 250
37100-234 234 234 253 253 253 253 253 253 253 253 253
37101-253 253 253 253 253 253 253 253 253 246 246 246
37102-238 238 238 226 226 226 210 210 210 202 202 202
37103-195 195 195 195 195 195 210 210 210 158 158 158
37104- 6 6 6 14 14 14 50 50 50 14 14 14
37105- 2 2 6 2 2 6 2 2 6 2 2 6
37106- 2 2 6 6 6 6 86 86 86 46 46 46
37107- 18 18 18 6 6 6 0 0 0 0 0 0
37108- 0 0 0 0 0 0 0 0 0 0 0 0
37109- 0 0 0 0 0 0 0 0 0 0 0 0
37110- 0 0 0 0 0 0 0 0 0 0 0 0
37111- 0 0 0 0 0 0 0 0 0 0 0 0
37112- 0 0 0 0 0 0 0 0 0 0 0 0
37113- 0 0 0 0 0 0 0 0 0 0 0 0
37114- 0 0 0 0 0 0 0 0 0 0 0 0
37115- 0 0 0 0 0 0 0 0 0 6 6 6
37116- 22 22 22 54 54 54 70 70 70 2 2 6
37117- 2 2 6 10 10 10 2 2 6 22 22 22
37118-166 166 166 231 231 231 250 250 250 253 253 253
37119-253 253 253 253 253 253 253 253 253 250 250 250
37120-242 242 242 253 253 253 253 253 253 253 253 253
37121-253 253 253 253 253 253 253 253 253 253 253 253
37122-253 253 253 253 253 253 253 253 253 246 246 246
37123-231 231 231 206 206 206 198 198 198 226 226 226
37124- 94 94 94 2 2 6 6 6 6 38 38 38
37125- 30 30 30 2 2 6 2 2 6 2 2 6
37126- 2 2 6 2 2 6 62 62 62 66 66 66
37127- 26 26 26 10 10 10 0 0 0 0 0 0
37128- 0 0 0 0 0 0 0 0 0 0 0 0
37129- 0 0 0 0 0 0 0 0 0 0 0 0
37130- 0 0 0 0 0 0 0 0 0 0 0 0
37131- 0 0 0 0 0 0 0 0 0 0 0 0
37132- 0 0 0 0 0 0 0 0 0 0 0 0
37133- 0 0 0 0 0 0 0 0 0 0 0 0
37134- 0 0 0 0 0 0 0 0 0 0 0 0
37135- 0 0 0 0 0 0 0 0 0 10 10 10
37136- 30 30 30 74 74 74 50 50 50 2 2 6
37137- 26 26 26 26 26 26 2 2 6 106 106 106
37138-238 238 238 253 253 253 253 253 253 253 253 253
37139-253 253 253 253 253 253 253 253 253 253 253 253
37140-253 253 253 253 253 253 253 253 253 253 253 253
37141-253 253 253 253 253 253 253 253 253 253 253 253
37142-253 253 253 253 253 253 253 253 253 253 253 253
37143-253 253 253 246 246 246 218 218 218 202 202 202
37144-210 210 210 14 14 14 2 2 6 2 2 6
37145- 30 30 30 22 22 22 2 2 6 2 2 6
37146- 2 2 6 2 2 6 18 18 18 86 86 86
37147- 42 42 42 14 14 14 0 0 0 0 0 0
37148- 0 0 0 0 0 0 0 0 0 0 0 0
37149- 0 0 0 0 0 0 0 0 0 0 0 0
37150- 0 0 0 0 0 0 0 0 0 0 0 0
37151- 0 0 0 0 0 0 0 0 0 0 0 0
37152- 0 0 0 0 0 0 0 0 0 0 0 0
37153- 0 0 0 0 0 0 0 0 0 0 0 0
37154- 0 0 0 0 0 0 0 0 0 0 0 0
37155- 0 0 0 0 0 0 0 0 0 14 14 14
37156- 42 42 42 90 90 90 22 22 22 2 2 6
37157- 42 42 42 2 2 6 18 18 18 218 218 218
37158-253 253 253 253 253 253 253 253 253 253 253 253
37159-253 253 253 253 253 253 253 253 253 253 253 253
37160-253 253 253 253 253 253 253 253 253 253 253 253
37161-253 253 253 253 253 253 253 253 253 253 253 253
37162-253 253 253 253 253 253 253 253 253 253 253 253
37163-253 253 253 253 253 253 250 250 250 221 221 221
37164-218 218 218 101 101 101 2 2 6 14 14 14
37165- 18 18 18 38 38 38 10 10 10 2 2 6
37166- 2 2 6 2 2 6 2 2 6 78 78 78
37167- 58 58 58 22 22 22 6 6 6 0 0 0
37168- 0 0 0 0 0 0 0 0 0 0 0 0
37169- 0 0 0 0 0 0 0 0 0 0 0 0
37170- 0 0 0 0 0 0 0 0 0 0 0 0
37171- 0 0 0 0 0 0 0 0 0 0 0 0
37172- 0 0 0 0 0 0 0 0 0 0 0 0
37173- 0 0 0 0 0 0 0 0 0 0 0 0
37174- 0 0 0 0 0 0 0 0 0 0 0 0
37175- 0 0 0 0 0 0 6 6 6 18 18 18
37176- 54 54 54 82 82 82 2 2 6 26 26 26
37177- 22 22 22 2 2 6 123 123 123 253 253 253
37178-253 253 253 253 253 253 253 253 253 253 253 253
37179-253 253 253 253 253 253 253 253 253 253 253 253
37180-253 253 253 253 253 253 253 253 253 253 253 253
37181-253 253 253 253 253 253 253 253 253 253 253 253
37182-253 253 253 253 253 253 253 253 253 253 253 253
37183-253 253 253 253 253 253 253 253 253 250 250 250
37184-238 238 238 198 198 198 6 6 6 38 38 38
37185- 58 58 58 26 26 26 38 38 38 2 2 6
37186- 2 2 6 2 2 6 2 2 6 46 46 46
37187- 78 78 78 30 30 30 10 10 10 0 0 0
37188- 0 0 0 0 0 0 0 0 0 0 0 0
37189- 0 0 0 0 0 0 0 0 0 0 0 0
37190- 0 0 0 0 0 0 0 0 0 0 0 0
37191- 0 0 0 0 0 0 0 0 0 0 0 0
37192- 0 0 0 0 0 0 0 0 0 0 0 0
37193- 0 0 0 0 0 0 0 0 0 0 0 0
37194- 0 0 0 0 0 0 0 0 0 0 0 0
37195- 0 0 0 0 0 0 10 10 10 30 30 30
37196- 74 74 74 58 58 58 2 2 6 42 42 42
37197- 2 2 6 22 22 22 231 231 231 253 253 253
37198-253 253 253 253 253 253 253 253 253 253 253 253
37199-253 253 253 253 253 253 253 253 253 250 250 250
37200-253 253 253 253 253 253 253 253 253 253 253 253
37201-253 253 253 253 253 253 253 253 253 253 253 253
37202-253 253 253 253 253 253 253 253 253 253 253 253
37203-253 253 253 253 253 253 253 253 253 253 253 253
37204-253 253 253 246 246 246 46 46 46 38 38 38
37205- 42 42 42 14 14 14 38 38 38 14 14 14
37206- 2 2 6 2 2 6 2 2 6 6 6 6
37207- 86 86 86 46 46 46 14 14 14 0 0 0
37208- 0 0 0 0 0 0 0 0 0 0 0 0
37209- 0 0 0 0 0 0 0 0 0 0 0 0
37210- 0 0 0 0 0 0 0 0 0 0 0 0
37211- 0 0 0 0 0 0 0 0 0 0 0 0
37212- 0 0 0 0 0 0 0 0 0 0 0 0
37213- 0 0 0 0 0 0 0 0 0 0 0 0
37214- 0 0 0 0 0 0 0 0 0 0 0 0
37215- 0 0 0 6 6 6 14 14 14 42 42 42
37216- 90 90 90 18 18 18 18 18 18 26 26 26
37217- 2 2 6 116 116 116 253 253 253 253 253 253
37218-253 253 253 253 253 253 253 253 253 253 253 253
37219-253 253 253 253 253 253 250 250 250 238 238 238
37220-253 253 253 253 253 253 253 253 253 253 253 253
37221-253 253 253 253 253 253 253 253 253 253 253 253
37222-253 253 253 253 253 253 253 253 253 253 253 253
37223-253 253 253 253 253 253 253 253 253 253 253 253
37224-253 253 253 253 253 253 94 94 94 6 6 6
37225- 2 2 6 2 2 6 10 10 10 34 34 34
37226- 2 2 6 2 2 6 2 2 6 2 2 6
37227- 74 74 74 58 58 58 22 22 22 6 6 6
37228- 0 0 0 0 0 0 0 0 0 0 0 0
37229- 0 0 0 0 0 0 0 0 0 0 0 0
37230- 0 0 0 0 0 0 0 0 0 0 0 0
37231- 0 0 0 0 0 0 0 0 0 0 0 0
37232- 0 0 0 0 0 0 0 0 0 0 0 0
37233- 0 0 0 0 0 0 0 0 0 0 0 0
37234- 0 0 0 0 0 0 0 0 0 0 0 0
37235- 0 0 0 10 10 10 26 26 26 66 66 66
37236- 82 82 82 2 2 6 38 38 38 6 6 6
37237- 14 14 14 210 210 210 253 253 253 253 253 253
37238-253 253 253 253 253 253 253 253 253 253 253 253
37239-253 253 253 253 253 253 246 246 246 242 242 242
37240-253 253 253 253 253 253 253 253 253 253 253 253
37241-253 253 253 253 253 253 253 253 253 253 253 253
37242-253 253 253 253 253 253 253 253 253 253 253 253
37243-253 253 253 253 253 253 253 253 253 253 253 253
37244-253 253 253 253 253 253 144 144 144 2 2 6
37245- 2 2 6 2 2 6 2 2 6 46 46 46
37246- 2 2 6 2 2 6 2 2 6 2 2 6
37247- 42 42 42 74 74 74 30 30 30 10 10 10
37248- 0 0 0 0 0 0 0 0 0 0 0 0
37249- 0 0 0 0 0 0 0 0 0 0 0 0
37250- 0 0 0 0 0 0 0 0 0 0 0 0
37251- 0 0 0 0 0 0 0 0 0 0 0 0
37252- 0 0 0 0 0 0 0 0 0 0 0 0
37253- 0 0 0 0 0 0 0 0 0 0 0 0
37254- 0 0 0 0 0 0 0 0 0 0 0 0
37255- 6 6 6 14 14 14 42 42 42 90 90 90
37256- 26 26 26 6 6 6 42 42 42 2 2 6
37257- 74 74 74 250 250 250 253 253 253 253 253 253
37258-253 253 253 253 253 253 253 253 253 253 253 253
37259-253 253 253 253 253 253 242 242 242 242 242 242
37260-253 253 253 253 253 253 253 253 253 253 253 253
37261-253 253 253 253 253 253 253 253 253 253 253 253
37262-253 253 253 253 253 253 253 253 253 253 253 253
37263-253 253 253 253 253 253 253 253 253 253 253 253
37264-253 253 253 253 253 253 182 182 182 2 2 6
37265- 2 2 6 2 2 6 2 2 6 46 46 46
37266- 2 2 6 2 2 6 2 2 6 2 2 6
37267- 10 10 10 86 86 86 38 38 38 10 10 10
37268- 0 0 0 0 0 0 0 0 0 0 0 0
37269- 0 0 0 0 0 0 0 0 0 0 0 0
37270- 0 0 0 0 0 0 0 0 0 0 0 0
37271- 0 0 0 0 0 0 0 0 0 0 0 0
37272- 0 0 0 0 0 0 0 0 0 0 0 0
37273- 0 0 0 0 0 0 0 0 0 0 0 0
37274- 0 0 0 0 0 0 0 0 0 0 0 0
37275- 10 10 10 26 26 26 66 66 66 82 82 82
37276- 2 2 6 22 22 22 18 18 18 2 2 6
37277-149 149 149 253 253 253 253 253 253 253 253 253
37278-253 253 253 253 253 253 253 253 253 253 253 253
37279-253 253 253 253 253 253 234 234 234 242 242 242
37280-253 253 253 253 253 253 253 253 253 253 253 253
37281-253 253 253 253 253 253 253 253 253 253 253 253
37282-253 253 253 253 253 253 253 253 253 253 253 253
37283-253 253 253 253 253 253 253 253 253 253 253 253
37284-253 253 253 253 253 253 206 206 206 2 2 6
37285- 2 2 6 2 2 6 2 2 6 38 38 38
37286- 2 2 6 2 2 6 2 2 6 2 2 6
37287- 6 6 6 86 86 86 46 46 46 14 14 14
37288- 0 0 0 0 0 0 0 0 0 0 0 0
37289- 0 0 0 0 0 0 0 0 0 0 0 0
37290- 0 0 0 0 0 0 0 0 0 0 0 0
37291- 0 0 0 0 0 0 0 0 0 0 0 0
37292- 0 0 0 0 0 0 0 0 0 0 0 0
37293- 0 0 0 0 0 0 0 0 0 0 0 0
37294- 0 0 0 0 0 0 0 0 0 6 6 6
37295- 18 18 18 46 46 46 86 86 86 18 18 18
37296- 2 2 6 34 34 34 10 10 10 6 6 6
37297-210 210 210 253 253 253 253 253 253 253 253 253
37298-253 253 253 253 253 253 253 253 253 253 253 253
37299-253 253 253 253 253 253 234 234 234 242 242 242
37300-253 253 253 253 253 253 253 253 253 253 253 253
37301-253 253 253 253 253 253 253 253 253 253 253 253
37302-253 253 253 253 253 253 253 253 253 253 253 253
37303-253 253 253 253 253 253 253 253 253 253 253 253
37304-253 253 253 253 253 253 221 221 221 6 6 6
37305- 2 2 6 2 2 6 6 6 6 30 30 30
37306- 2 2 6 2 2 6 2 2 6 2 2 6
37307- 2 2 6 82 82 82 54 54 54 18 18 18
37308- 6 6 6 0 0 0 0 0 0 0 0 0
37309- 0 0 0 0 0 0 0 0 0 0 0 0
37310- 0 0 0 0 0 0 0 0 0 0 0 0
37311- 0 0 0 0 0 0 0 0 0 0 0 0
37312- 0 0 0 0 0 0 0 0 0 0 0 0
37313- 0 0 0 0 0 0 0 0 0 0 0 0
37314- 0 0 0 0 0 0 0 0 0 10 10 10
37315- 26 26 26 66 66 66 62 62 62 2 2 6
37316- 2 2 6 38 38 38 10 10 10 26 26 26
37317-238 238 238 253 253 253 253 253 253 253 253 253
37318-253 253 253 253 253 253 253 253 253 253 253 253
37319-253 253 253 253 253 253 231 231 231 238 238 238
37320-253 253 253 253 253 253 253 253 253 253 253 253
37321-253 253 253 253 253 253 253 253 253 253 253 253
37322-253 253 253 253 253 253 253 253 253 253 253 253
37323-253 253 253 253 253 253 253 253 253 253 253 253
37324-253 253 253 253 253 253 231 231 231 6 6 6
37325- 2 2 6 2 2 6 10 10 10 30 30 30
37326- 2 2 6 2 2 6 2 2 6 2 2 6
37327- 2 2 6 66 66 66 58 58 58 22 22 22
37328- 6 6 6 0 0 0 0 0 0 0 0 0
37329- 0 0 0 0 0 0 0 0 0 0 0 0
37330- 0 0 0 0 0 0 0 0 0 0 0 0
37331- 0 0 0 0 0 0 0 0 0 0 0 0
37332- 0 0 0 0 0 0 0 0 0 0 0 0
37333- 0 0 0 0 0 0 0 0 0 0 0 0
37334- 0 0 0 0 0 0 0 0 0 10 10 10
37335- 38 38 38 78 78 78 6 6 6 2 2 6
37336- 2 2 6 46 46 46 14 14 14 42 42 42
37337-246 246 246 253 253 253 253 253 253 253 253 253
37338-253 253 253 253 253 253 253 253 253 253 253 253
37339-253 253 253 253 253 253 231 231 231 242 242 242
37340-253 253 253 253 253 253 253 253 253 253 253 253
37341-253 253 253 253 253 253 253 253 253 253 253 253
37342-253 253 253 253 253 253 253 253 253 253 253 253
37343-253 253 253 253 253 253 253 253 253 253 253 253
37344-253 253 253 253 253 253 234 234 234 10 10 10
37345- 2 2 6 2 2 6 22 22 22 14 14 14
37346- 2 2 6 2 2 6 2 2 6 2 2 6
37347- 2 2 6 66 66 66 62 62 62 22 22 22
37348- 6 6 6 0 0 0 0 0 0 0 0 0
37349- 0 0 0 0 0 0 0 0 0 0 0 0
37350- 0 0 0 0 0 0 0 0 0 0 0 0
37351- 0 0 0 0 0 0 0 0 0 0 0 0
37352- 0 0 0 0 0 0 0 0 0 0 0 0
37353- 0 0 0 0 0 0 0 0 0 0 0 0
37354- 0 0 0 0 0 0 6 6 6 18 18 18
37355- 50 50 50 74 74 74 2 2 6 2 2 6
37356- 14 14 14 70 70 70 34 34 34 62 62 62
37357-250 250 250 253 253 253 253 253 253 253 253 253
37358-253 253 253 253 253 253 253 253 253 253 253 253
37359-253 253 253 253 253 253 231 231 231 246 246 246
37360-253 253 253 253 253 253 253 253 253 253 253 253
37361-253 253 253 253 253 253 253 253 253 253 253 253
37362-253 253 253 253 253 253 253 253 253 253 253 253
37363-253 253 253 253 253 253 253 253 253 253 253 253
37364-253 253 253 253 253 253 234 234 234 14 14 14
37365- 2 2 6 2 2 6 30 30 30 2 2 6
37366- 2 2 6 2 2 6 2 2 6 2 2 6
37367- 2 2 6 66 66 66 62 62 62 22 22 22
37368- 6 6 6 0 0 0 0 0 0 0 0 0
37369- 0 0 0 0 0 0 0 0 0 0 0 0
37370- 0 0 0 0 0 0 0 0 0 0 0 0
37371- 0 0 0 0 0 0 0 0 0 0 0 0
37372- 0 0 0 0 0 0 0 0 0 0 0 0
37373- 0 0 0 0 0 0 0 0 0 0 0 0
37374- 0 0 0 0 0 0 6 6 6 18 18 18
37375- 54 54 54 62 62 62 2 2 6 2 2 6
37376- 2 2 6 30 30 30 46 46 46 70 70 70
37377-250 250 250 253 253 253 253 253 253 253 253 253
37378-253 253 253 253 253 253 253 253 253 253 253 253
37379-253 253 253 253 253 253 231 231 231 246 246 246
37380-253 253 253 253 253 253 253 253 253 253 253 253
37381-253 253 253 253 253 253 253 253 253 253 253 253
37382-253 253 253 253 253 253 253 253 253 253 253 253
37383-253 253 253 253 253 253 253 253 253 253 253 253
37384-253 253 253 253 253 253 226 226 226 10 10 10
37385- 2 2 6 6 6 6 30 30 30 2 2 6
37386- 2 2 6 2 2 6 2 2 6 2 2 6
37387- 2 2 6 66 66 66 58 58 58 22 22 22
37388- 6 6 6 0 0 0 0 0 0 0 0 0
37389- 0 0 0 0 0 0 0 0 0 0 0 0
37390- 0 0 0 0 0 0 0 0 0 0 0 0
37391- 0 0 0 0 0 0 0 0 0 0 0 0
37392- 0 0 0 0 0 0 0 0 0 0 0 0
37393- 0 0 0 0 0 0 0 0 0 0 0 0
37394- 0 0 0 0 0 0 6 6 6 22 22 22
37395- 58 58 58 62 62 62 2 2 6 2 2 6
37396- 2 2 6 2 2 6 30 30 30 78 78 78
37397-250 250 250 253 253 253 253 253 253 253 253 253
37398-253 253 253 253 253 253 253 253 253 253 253 253
37399-253 253 253 253 253 253 231 231 231 246 246 246
37400-253 253 253 253 253 253 253 253 253 253 253 253
37401-253 253 253 253 253 253 253 253 253 253 253 253
37402-253 253 253 253 253 253 253 253 253 253 253 253
37403-253 253 253 253 253 253 253 253 253 253 253 253
37404-253 253 253 253 253 253 206 206 206 2 2 6
37405- 22 22 22 34 34 34 18 14 6 22 22 22
37406- 26 26 26 18 18 18 6 6 6 2 2 6
37407- 2 2 6 82 82 82 54 54 54 18 18 18
37408- 6 6 6 0 0 0 0 0 0 0 0 0
37409- 0 0 0 0 0 0 0 0 0 0 0 0
37410- 0 0 0 0 0 0 0 0 0 0 0 0
37411- 0 0 0 0 0 0 0 0 0 0 0 0
37412- 0 0 0 0 0 0 0 0 0 0 0 0
37413- 0 0 0 0 0 0 0 0 0 0 0 0
37414- 0 0 0 0 0 0 6 6 6 26 26 26
37415- 62 62 62 106 106 106 74 54 14 185 133 11
37416-210 162 10 121 92 8 6 6 6 62 62 62
37417-238 238 238 253 253 253 253 253 253 253 253 253
37418-253 253 253 253 253 253 253 253 253 253 253 253
37419-253 253 253 253 253 253 231 231 231 246 246 246
37420-253 253 253 253 253 253 253 253 253 253 253 253
37421-253 253 253 253 253 253 253 253 253 253 253 253
37422-253 253 253 253 253 253 253 253 253 253 253 253
37423-253 253 253 253 253 253 253 253 253 253 253 253
37424-253 253 253 253 253 253 158 158 158 18 18 18
37425- 14 14 14 2 2 6 2 2 6 2 2 6
37426- 6 6 6 18 18 18 66 66 66 38 38 38
37427- 6 6 6 94 94 94 50 50 50 18 18 18
37428- 6 6 6 0 0 0 0 0 0 0 0 0
37429- 0 0 0 0 0 0 0 0 0 0 0 0
37430- 0 0 0 0 0 0 0 0 0 0 0 0
37431- 0 0 0 0 0 0 0 0 0 0 0 0
37432- 0 0 0 0 0 0 0 0 0 0 0 0
37433- 0 0 0 0 0 0 0 0 0 6 6 6
37434- 10 10 10 10 10 10 18 18 18 38 38 38
37435- 78 78 78 142 134 106 216 158 10 242 186 14
37436-246 190 14 246 190 14 156 118 10 10 10 10
37437- 90 90 90 238 238 238 253 253 253 253 253 253
37438-253 253 253 253 253 253 253 253 253 253 253 253
37439-253 253 253 253 253 253 231 231 231 250 250 250
37440-253 253 253 253 253 253 253 253 253 253 253 253
37441-253 253 253 253 253 253 253 253 253 253 253 253
37442-253 253 253 253 253 253 253 253 253 253 253 253
37443-253 253 253 253 253 253 253 253 253 246 230 190
37444-238 204 91 238 204 91 181 142 44 37 26 9
37445- 2 2 6 2 2 6 2 2 6 2 2 6
37446- 2 2 6 2 2 6 38 38 38 46 46 46
37447- 26 26 26 106 106 106 54 54 54 18 18 18
37448- 6 6 6 0 0 0 0 0 0 0 0 0
37449- 0 0 0 0 0 0 0 0 0 0 0 0
37450- 0 0 0 0 0 0 0 0 0 0 0 0
37451- 0 0 0 0 0 0 0 0 0 0 0 0
37452- 0 0 0 0 0 0 0 0 0 0 0 0
37453- 0 0 0 6 6 6 14 14 14 22 22 22
37454- 30 30 30 38 38 38 50 50 50 70 70 70
37455-106 106 106 190 142 34 226 170 11 242 186 14
37456-246 190 14 246 190 14 246 190 14 154 114 10
37457- 6 6 6 74 74 74 226 226 226 253 253 253
37458-253 253 253 253 253 253 253 253 253 253 253 253
37459-253 253 253 253 253 253 231 231 231 250 250 250
37460-253 253 253 253 253 253 253 253 253 253 253 253
37461-253 253 253 253 253 253 253 253 253 253 253 253
37462-253 253 253 253 253 253 253 253 253 253 253 253
37463-253 253 253 253 253 253 253 253 253 228 184 62
37464-241 196 14 241 208 19 232 195 16 38 30 10
37465- 2 2 6 2 2 6 2 2 6 2 2 6
37466- 2 2 6 6 6 6 30 30 30 26 26 26
37467-203 166 17 154 142 90 66 66 66 26 26 26
37468- 6 6 6 0 0 0 0 0 0 0 0 0
37469- 0 0 0 0 0 0 0 0 0 0 0 0
37470- 0 0 0 0 0 0 0 0 0 0 0 0
37471- 0 0 0 0 0 0 0 0 0 0 0 0
37472- 0 0 0 0 0 0 0 0 0 0 0 0
37473- 6 6 6 18 18 18 38 38 38 58 58 58
37474- 78 78 78 86 86 86 101 101 101 123 123 123
37475-175 146 61 210 150 10 234 174 13 246 186 14
37476-246 190 14 246 190 14 246 190 14 238 190 10
37477-102 78 10 2 2 6 46 46 46 198 198 198
37478-253 253 253 253 253 253 253 253 253 253 253 253
37479-253 253 253 253 253 253 234 234 234 242 242 242
37480-253 253 253 253 253 253 253 253 253 253 253 253
37481-253 253 253 253 253 253 253 253 253 253 253 253
37482-253 253 253 253 253 253 253 253 253 253 253 253
37483-253 253 253 253 253 253 253 253 253 224 178 62
37484-242 186 14 241 196 14 210 166 10 22 18 6
37485- 2 2 6 2 2 6 2 2 6 2 2 6
37486- 2 2 6 2 2 6 6 6 6 121 92 8
37487-238 202 15 232 195 16 82 82 82 34 34 34
37488- 10 10 10 0 0 0 0 0 0 0 0 0
37489- 0 0 0 0 0 0 0 0 0 0 0 0
37490- 0 0 0 0 0 0 0 0 0 0 0 0
37491- 0 0 0 0 0 0 0 0 0 0 0 0
37492- 0 0 0 0 0 0 0 0 0 0 0 0
37493- 14 14 14 38 38 38 70 70 70 154 122 46
37494-190 142 34 200 144 11 197 138 11 197 138 11
37495-213 154 11 226 170 11 242 186 14 246 190 14
37496-246 190 14 246 190 14 246 190 14 246 190 14
37497-225 175 15 46 32 6 2 2 6 22 22 22
37498-158 158 158 250 250 250 253 253 253 253 253 253
37499-253 253 253 253 253 253 253 253 253 253 253 253
37500-253 253 253 253 253 253 253 253 253 253 253 253
37501-253 253 253 253 253 253 253 253 253 253 253 253
37502-253 253 253 253 253 253 253 253 253 253 253 253
37503-253 253 253 250 250 250 242 242 242 224 178 62
37504-239 182 13 236 186 11 213 154 11 46 32 6
37505- 2 2 6 2 2 6 2 2 6 2 2 6
37506- 2 2 6 2 2 6 61 42 6 225 175 15
37507-238 190 10 236 186 11 112 100 78 42 42 42
37508- 14 14 14 0 0 0 0 0 0 0 0 0
37509- 0 0 0 0 0 0 0 0 0 0 0 0
37510- 0 0 0 0 0 0 0 0 0 0 0 0
37511- 0 0 0 0 0 0 0 0 0 0 0 0
37512- 0 0 0 0 0 0 0 0 0 6 6 6
37513- 22 22 22 54 54 54 154 122 46 213 154 11
37514-226 170 11 230 174 11 226 170 11 226 170 11
37515-236 178 12 242 186 14 246 190 14 246 190 14
37516-246 190 14 246 190 14 246 190 14 246 190 14
37517-241 196 14 184 144 12 10 10 10 2 2 6
37518- 6 6 6 116 116 116 242 242 242 253 253 253
37519-253 253 253 253 253 253 253 253 253 253 253 253
37520-253 253 253 253 253 253 253 253 253 253 253 253
37521-253 253 253 253 253 253 253 253 253 253 253 253
37522-253 253 253 253 253 253 253 253 253 253 253 253
37523-253 253 253 231 231 231 198 198 198 214 170 54
37524-236 178 12 236 178 12 210 150 10 137 92 6
37525- 18 14 6 2 2 6 2 2 6 2 2 6
37526- 6 6 6 70 47 6 200 144 11 236 178 12
37527-239 182 13 239 182 13 124 112 88 58 58 58
37528- 22 22 22 6 6 6 0 0 0 0 0 0
37529- 0 0 0 0 0 0 0 0 0 0 0 0
37530- 0 0 0 0 0 0 0 0 0 0 0 0
37531- 0 0 0 0 0 0 0 0 0 0 0 0
37532- 0 0 0 0 0 0 0 0 0 10 10 10
37533- 30 30 30 70 70 70 180 133 36 226 170 11
37534-239 182 13 242 186 14 242 186 14 246 186 14
37535-246 190 14 246 190 14 246 190 14 246 190 14
37536-246 190 14 246 190 14 246 190 14 246 190 14
37537-246 190 14 232 195 16 98 70 6 2 2 6
37538- 2 2 6 2 2 6 66 66 66 221 221 221
37539-253 253 253 253 253 253 253 253 253 253 253 253
37540-253 253 253 253 253 253 253 253 253 253 253 253
37541-253 253 253 253 253 253 253 253 253 253 253 253
37542-253 253 253 253 253 253 253 253 253 253 253 253
37543-253 253 253 206 206 206 198 198 198 214 166 58
37544-230 174 11 230 174 11 216 158 10 192 133 9
37545-163 110 8 116 81 8 102 78 10 116 81 8
37546-167 114 7 197 138 11 226 170 11 239 182 13
37547-242 186 14 242 186 14 162 146 94 78 78 78
37548- 34 34 34 14 14 14 6 6 6 0 0 0
37549- 0 0 0 0 0 0 0 0 0 0 0 0
37550- 0 0 0 0 0 0 0 0 0 0 0 0
37551- 0 0 0 0 0 0 0 0 0 0 0 0
37552- 0 0 0 0 0 0 0 0 0 6 6 6
37553- 30 30 30 78 78 78 190 142 34 226 170 11
37554-239 182 13 246 190 14 246 190 14 246 190 14
37555-246 190 14 246 190 14 246 190 14 246 190 14
37556-246 190 14 246 190 14 246 190 14 246 190 14
37557-246 190 14 241 196 14 203 166 17 22 18 6
37558- 2 2 6 2 2 6 2 2 6 38 38 38
37559-218 218 218 253 253 253 253 253 253 253 253 253
37560-253 253 253 253 253 253 253 253 253 253 253 253
37561-253 253 253 253 253 253 253 253 253 253 253 253
37562-253 253 253 253 253 253 253 253 253 253 253 253
37563-250 250 250 206 206 206 198 198 198 202 162 69
37564-226 170 11 236 178 12 224 166 10 210 150 10
37565-200 144 11 197 138 11 192 133 9 197 138 11
37566-210 150 10 226 170 11 242 186 14 246 190 14
37567-246 190 14 246 186 14 225 175 15 124 112 88
37568- 62 62 62 30 30 30 14 14 14 6 6 6
37569- 0 0 0 0 0 0 0 0 0 0 0 0
37570- 0 0 0 0 0 0 0 0 0 0 0 0
37571- 0 0 0 0 0 0 0 0 0 0 0 0
37572- 0 0 0 0 0 0 0 0 0 10 10 10
37573- 30 30 30 78 78 78 174 135 50 224 166 10
37574-239 182 13 246 190 14 246 190 14 246 190 14
37575-246 190 14 246 190 14 246 190 14 246 190 14
37576-246 190 14 246 190 14 246 190 14 246 190 14
37577-246 190 14 246 190 14 241 196 14 139 102 15
37578- 2 2 6 2 2 6 2 2 6 2 2 6
37579- 78 78 78 250 250 250 253 253 253 253 253 253
37580-253 253 253 253 253 253 253 253 253 253 253 253
37581-253 253 253 253 253 253 253 253 253 253 253 253
37582-253 253 253 253 253 253 253 253 253 253 253 253
37583-250 250 250 214 214 214 198 198 198 190 150 46
37584-219 162 10 236 178 12 234 174 13 224 166 10
37585-216 158 10 213 154 11 213 154 11 216 158 10
37586-226 170 11 239 182 13 246 190 14 246 190 14
37587-246 190 14 246 190 14 242 186 14 206 162 42
37588-101 101 101 58 58 58 30 30 30 14 14 14
37589- 6 6 6 0 0 0 0 0 0 0 0 0
37590- 0 0 0 0 0 0 0 0 0 0 0 0
37591- 0 0 0 0 0 0 0 0 0 0 0 0
37592- 0 0 0 0 0 0 0 0 0 10 10 10
37593- 30 30 30 74 74 74 174 135 50 216 158 10
37594-236 178 12 246 190 14 246 190 14 246 190 14
37595-246 190 14 246 190 14 246 190 14 246 190 14
37596-246 190 14 246 190 14 246 190 14 246 190 14
37597-246 190 14 246 190 14 241 196 14 226 184 13
37598- 61 42 6 2 2 6 2 2 6 2 2 6
37599- 22 22 22 238 238 238 253 253 253 253 253 253
37600-253 253 253 253 253 253 253 253 253 253 253 253
37601-253 253 253 253 253 253 253 253 253 253 253 253
37602-253 253 253 253 253 253 253 253 253 253 253 253
37603-253 253 253 226 226 226 187 187 187 180 133 36
37604-216 158 10 236 178 12 239 182 13 236 178 12
37605-230 174 11 226 170 11 226 170 11 230 174 11
37606-236 178 12 242 186 14 246 190 14 246 190 14
37607-246 190 14 246 190 14 246 186 14 239 182 13
37608-206 162 42 106 106 106 66 66 66 34 34 34
37609- 14 14 14 6 6 6 0 0 0 0 0 0
37610- 0 0 0 0 0 0 0 0 0 0 0 0
37611- 0 0 0 0 0 0 0 0 0 0 0 0
37612- 0 0 0 0 0 0 0 0 0 6 6 6
37613- 26 26 26 70 70 70 163 133 67 213 154 11
37614-236 178 12 246 190 14 246 190 14 246 190 14
37615-246 190 14 246 190 14 246 190 14 246 190 14
37616-246 190 14 246 190 14 246 190 14 246 190 14
37617-246 190 14 246 190 14 246 190 14 241 196 14
37618-190 146 13 18 14 6 2 2 6 2 2 6
37619- 46 46 46 246 246 246 253 253 253 253 253 253
37620-253 253 253 253 253 253 253 253 253 253 253 253
37621-253 253 253 253 253 253 253 253 253 253 253 253
37622-253 253 253 253 253 253 253 253 253 253 253 253
37623-253 253 253 221 221 221 86 86 86 156 107 11
37624-216 158 10 236 178 12 242 186 14 246 186 14
37625-242 186 14 239 182 13 239 182 13 242 186 14
37626-242 186 14 246 186 14 246 190 14 246 190 14
37627-246 190 14 246 190 14 246 190 14 246 190 14
37628-242 186 14 225 175 15 142 122 72 66 66 66
37629- 30 30 30 10 10 10 0 0 0 0 0 0
37630- 0 0 0 0 0 0 0 0 0 0 0 0
37631- 0 0 0 0 0 0 0 0 0 0 0 0
37632- 0 0 0 0 0 0 0 0 0 6 6 6
37633- 26 26 26 70 70 70 163 133 67 210 150 10
37634-236 178 12 246 190 14 246 190 14 246 190 14
37635-246 190 14 246 190 14 246 190 14 246 190 14
37636-246 190 14 246 190 14 246 190 14 246 190 14
37637-246 190 14 246 190 14 246 190 14 246 190 14
37638-232 195 16 121 92 8 34 34 34 106 106 106
37639-221 221 221 253 253 253 253 253 253 253 253 253
37640-253 253 253 253 253 253 253 253 253 253 253 253
37641-253 253 253 253 253 253 253 253 253 253 253 253
37642-253 253 253 253 253 253 253 253 253 253 253 253
37643-242 242 242 82 82 82 18 14 6 163 110 8
37644-216 158 10 236 178 12 242 186 14 246 190 14
37645-246 190 14 246 190 14 246 190 14 246 190 14
37646-246 190 14 246 190 14 246 190 14 246 190 14
37647-246 190 14 246 190 14 246 190 14 246 190 14
37648-246 190 14 246 190 14 242 186 14 163 133 67
37649- 46 46 46 18 18 18 6 6 6 0 0 0
37650- 0 0 0 0 0 0 0 0 0 0 0 0
37651- 0 0 0 0 0 0 0 0 0 0 0 0
37652- 0 0 0 0 0 0 0 0 0 10 10 10
37653- 30 30 30 78 78 78 163 133 67 210 150 10
37654-236 178 12 246 186 14 246 190 14 246 190 14
37655-246 190 14 246 190 14 246 190 14 246 190 14
37656-246 190 14 246 190 14 246 190 14 246 190 14
37657-246 190 14 246 190 14 246 190 14 246 190 14
37658-241 196 14 215 174 15 190 178 144 253 253 253
37659-253 253 253 253 253 253 253 253 253 253 253 253
37660-253 253 253 253 253 253 253 253 253 253 253 253
37661-253 253 253 253 253 253 253 253 253 253 253 253
37662-253 253 253 253 253 253 253 253 253 218 218 218
37663- 58 58 58 2 2 6 22 18 6 167 114 7
37664-216 158 10 236 178 12 246 186 14 246 190 14
37665-246 190 14 246 190 14 246 190 14 246 190 14
37666-246 190 14 246 190 14 246 190 14 246 190 14
37667-246 190 14 246 190 14 246 190 14 246 190 14
37668-246 190 14 246 186 14 242 186 14 190 150 46
37669- 54 54 54 22 22 22 6 6 6 0 0 0
37670- 0 0 0 0 0 0 0 0 0 0 0 0
37671- 0 0 0 0 0 0 0 0 0 0 0 0
37672- 0 0 0 0 0 0 0 0 0 14 14 14
37673- 38 38 38 86 86 86 180 133 36 213 154 11
37674-236 178 12 246 186 14 246 190 14 246 190 14
37675-246 190 14 246 190 14 246 190 14 246 190 14
37676-246 190 14 246 190 14 246 190 14 246 190 14
37677-246 190 14 246 190 14 246 190 14 246 190 14
37678-246 190 14 232 195 16 190 146 13 214 214 214
37679-253 253 253 253 253 253 253 253 253 253 253 253
37680-253 253 253 253 253 253 253 253 253 253 253 253
37681-253 253 253 253 253 253 253 253 253 253 253 253
37682-253 253 253 250 250 250 170 170 170 26 26 26
37683- 2 2 6 2 2 6 37 26 9 163 110 8
37684-219 162 10 239 182 13 246 186 14 246 190 14
37685-246 190 14 246 190 14 246 190 14 246 190 14
37686-246 190 14 246 190 14 246 190 14 246 190 14
37687-246 190 14 246 190 14 246 190 14 246 190 14
37688-246 186 14 236 178 12 224 166 10 142 122 72
37689- 46 46 46 18 18 18 6 6 6 0 0 0
37690- 0 0 0 0 0 0 0 0 0 0 0 0
37691- 0 0 0 0 0 0 0 0 0 0 0 0
37692- 0 0 0 0 0 0 6 6 6 18 18 18
37693- 50 50 50 109 106 95 192 133 9 224 166 10
37694-242 186 14 246 190 14 246 190 14 246 190 14
37695-246 190 14 246 190 14 246 190 14 246 190 14
37696-246 190 14 246 190 14 246 190 14 246 190 14
37697-246 190 14 246 190 14 246 190 14 246 190 14
37698-242 186 14 226 184 13 210 162 10 142 110 46
37699-226 226 226 253 253 253 253 253 253 253 253 253
37700-253 253 253 253 253 253 253 253 253 253 253 253
37701-253 253 253 253 253 253 253 253 253 253 253 253
37702-198 198 198 66 66 66 2 2 6 2 2 6
37703- 2 2 6 2 2 6 50 34 6 156 107 11
37704-219 162 10 239 182 13 246 186 14 246 190 14
37705-246 190 14 246 190 14 246 190 14 246 190 14
37706-246 190 14 246 190 14 246 190 14 246 190 14
37707-246 190 14 246 190 14 246 190 14 242 186 14
37708-234 174 13 213 154 11 154 122 46 66 66 66
37709- 30 30 30 10 10 10 0 0 0 0 0 0
37710- 0 0 0 0 0 0 0 0 0 0 0 0
37711- 0 0 0 0 0 0 0 0 0 0 0 0
37712- 0 0 0 0 0 0 6 6 6 22 22 22
37713- 58 58 58 154 121 60 206 145 10 234 174 13
37714-242 186 14 246 186 14 246 190 14 246 190 14
37715-246 190 14 246 190 14 246 190 14 246 190 14
37716-246 190 14 246 190 14 246 190 14 246 190 14
37717-246 190 14 246 190 14 246 190 14 246 190 14
37718-246 186 14 236 178 12 210 162 10 163 110 8
37719- 61 42 6 138 138 138 218 218 218 250 250 250
37720-253 253 253 253 253 253 253 253 253 250 250 250
37721-242 242 242 210 210 210 144 144 144 66 66 66
37722- 6 6 6 2 2 6 2 2 6 2 2 6
37723- 2 2 6 2 2 6 61 42 6 163 110 8
37724-216 158 10 236 178 12 246 190 14 246 190 14
37725-246 190 14 246 190 14 246 190 14 246 190 14
37726-246 190 14 246 190 14 246 190 14 246 190 14
37727-246 190 14 239 182 13 230 174 11 216 158 10
37728-190 142 34 124 112 88 70 70 70 38 38 38
37729- 18 18 18 6 6 6 0 0 0 0 0 0
37730- 0 0 0 0 0 0 0 0 0 0 0 0
37731- 0 0 0 0 0 0 0 0 0 0 0 0
37732- 0 0 0 0 0 0 6 6 6 22 22 22
37733- 62 62 62 168 124 44 206 145 10 224 166 10
37734-236 178 12 239 182 13 242 186 14 242 186 14
37735-246 186 14 246 190 14 246 190 14 246 190 14
37736-246 190 14 246 190 14 246 190 14 246 190 14
37737-246 190 14 246 190 14 246 190 14 246 190 14
37738-246 190 14 236 178 12 216 158 10 175 118 6
37739- 80 54 7 2 2 6 6 6 6 30 30 30
37740- 54 54 54 62 62 62 50 50 50 38 38 38
37741- 14 14 14 2 2 6 2 2 6 2 2 6
37742- 2 2 6 2 2 6 2 2 6 2 2 6
37743- 2 2 6 6 6 6 80 54 7 167 114 7
37744-213 154 11 236 178 12 246 190 14 246 190 14
37745-246 190 14 246 190 14 246 190 14 246 190 14
37746-246 190 14 242 186 14 239 182 13 239 182 13
37747-230 174 11 210 150 10 174 135 50 124 112 88
37748- 82 82 82 54 54 54 34 34 34 18 18 18
37749- 6 6 6 0 0 0 0 0 0 0 0 0
37750- 0 0 0 0 0 0 0 0 0 0 0 0
37751- 0 0 0 0 0 0 0 0 0 0 0 0
37752- 0 0 0 0 0 0 6 6 6 18 18 18
37753- 50 50 50 158 118 36 192 133 9 200 144 11
37754-216 158 10 219 162 10 224 166 10 226 170 11
37755-230 174 11 236 178 12 239 182 13 239 182 13
37756-242 186 14 246 186 14 246 190 14 246 190 14
37757-246 190 14 246 190 14 246 190 14 246 190 14
37758-246 186 14 230 174 11 210 150 10 163 110 8
37759-104 69 6 10 10 10 2 2 6 2 2 6
37760- 2 2 6 2 2 6 2 2 6 2 2 6
37761- 2 2 6 2 2 6 2 2 6 2 2 6
37762- 2 2 6 2 2 6 2 2 6 2 2 6
37763- 2 2 6 6 6 6 91 60 6 167 114 7
37764-206 145 10 230 174 11 242 186 14 246 190 14
37765-246 190 14 246 190 14 246 186 14 242 186 14
37766-239 182 13 230 174 11 224 166 10 213 154 11
37767-180 133 36 124 112 88 86 86 86 58 58 58
37768- 38 38 38 22 22 22 10 10 10 6 6 6
37769- 0 0 0 0 0 0 0 0 0 0 0 0
37770- 0 0 0 0 0 0 0 0 0 0 0 0
37771- 0 0 0 0 0 0 0 0 0 0 0 0
37772- 0 0 0 0 0 0 0 0 0 14 14 14
37773- 34 34 34 70 70 70 138 110 50 158 118 36
37774-167 114 7 180 123 7 192 133 9 197 138 11
37775-200 144 11 206 145 10 213 154 11 219 162 10
37776-224 166 10 230 174 11 239 182 13 242 186 14
37777-246 186 14 246 186 14 246 186 14 246 186 14
37778-239 182 13 216 158 10 185 133 11 152 99 6
37779-104 69 6 18 14 6 2 2 6 2 2 6
37780- 2 2 6 2 2 6 2 2 6 2 2 6
37781- 2 2 6 2 2 6 2 2 6 2 2 6
37782- 2 2 6 2 2 6 2 2 6 2 2 6
37783- 2 2 6 6 6 6 80 54 7 152 99 6
37784-192 133 9 219 162 10 236 178 12 239 182 13
37785-246 186 14 242 186 14 239 182 13 236 178 12
37786-224 166 10 206 145 10 192 133 9 154 121 60
37787- 94 94 94 62 62 62 42 42 42 22 22 22
37788- 14 14 14 6 6 6 0 0 0 0 0 0
37789- 0 0 0 0 0 0 0 0 0 0 0 0
37790- 0 0 0 0 0 0 0 0 0 0 0 0
37791- 0 0 0 0 0 0 0 0 0 0 0 0
37792- 0 0 0 0 0 0 0 0 0 6 6 6
37793- 18 18 18 34 34 34 58 58 58 78 78 78
37794-101 98 89 124 112 88 142 110 46 156 107 11
37795-163 110 8 167 114 7 175 118 6 180 123 7
37796-185 133 11 197 138 11 210 150 10 219 162 10
37797-226 170 11 236 178 12 236 178 12 234 174 13
37798-219 162 10 197 138 11 163 110 8 130 83 6
37799- 91 60 6 10 10 10 2 2 6 2 2 6
37800- 18 18 18 38 38 38 38 38 38 38 38 38
37801- 38 38 38 38 38 38 38 38 38 38 38 38
37802- 38 38 38 38 38 38 26 26 26 2 2 6
37803- 2 2 6 6 6 6 70 47 6 137 92 6
37804-175 118 6 200 144 11 219 162 10 230 174 11
37805-234 174 13 230 174 11 219 162 10 210 150 10
37806-192 133 9 163 110 8 124 112 88 82 82 82
37807- 50 50 50 30 30 30 14 14 14 6 6 6
37808- 0 0 0 0 0 0 0 0 0 0 0 0
37809- 0 0 0 0 0 0 0 0 0 0 0 0
37810- 0 0 0 0 0 0 0 0 0 0 0 0
37811- 0 0 0 0 0 0 0 0 0 0 0 0
37812- 0 0 0 0 0 0 0 0 0 0 0 0
37813- 6 6 6 14 14 14 22 22 22 34 34 34
37814- 42 42 42 58 58 58 74 74 74 86 86 86
37815-101 98 89 122 102 70 130 98 46 121 87 25
37816-137 92 6 152 99 6 163 110 8 180 123 7
37817-185 133 11 197 138 11 206 145 10 200 144 11
37818-180 123 7 156 107 11 130 83 6 104 69 6
37819- 50 34 6 54 54 54 110 110 110 101 98 89
37820- 86 86 86 82 82 82 78 78 78 78 78 78
37821- 78 78 78 78 78 78 78 78 78 78 78 78
37822- 78 78 78 82 82 82 86 86 86 94 94 94
37823-106 106 106 101 101 101 86 66 34 124 80 6
37824-156 107 11 180 123 7 192 133 9 200 144 11
37825-206 145 10 200 144 11 192 133 9 175 118 6
37826-139 102 15 109 106 95 70 70 70 42 42 42
37827- 22 22 22 10 10 10 0 0 0 0 0 0
37828- 0 0 0 0 0 0 0 0 0 0 0 0
37829- 0 0 0 0 0 0 0 0 0 0 0 0
37830- 0 0 0 0 0 0 0 0 0 0 0 0
37831- 0 0 0 0 0 0 0 0 0 0 0 0
37832- 0 0 0 0 0 0 0 0 0 0 0 0
37833- 0 0 0 0 0 0 6 6 6 10 10 10
37834- 14 14 14 22 22 22 30 30 30 38 38 38
37835- 50 50 50 62 62 62 74 74 74 90 90 90
37836-101 98 89 112 100 78 121 87 25 124 80 6
37837-137 92 6 152 99 6 152 99 6 152 99 6
37838-138 86 6 124 80 6 98 70 6 86 66 30
37839-101 98 89 82 82 82 58 58 58 46 46 46
37840- 38 38 38 34 34 34 34 34 34 34 34 34
37841- 34 34 34 34 34 34 34 34 34 34 34 34
37842- 34 34 34 34 34 34 38 38 38 42 42 42
37843- 54 54 54 82 82 82 94 86 76 91 60 6
37844-134 86 6 156 107 11 167 114 7 175 118 6
37845-175 118 6 167 114 7 152 99 6 121 87 25
37846-101 98 89 62 62 62 34 34 34 18 18 18
37847- 6 6 6 0 0 0 0 0 0 0 0 0
37848- 0 0 0 0 0 0 0 0 0 0 0 0
37849- 0 0 0 0 0 0 0 0 0 0 0 0
37850- 0 0 0 0 0 0 0 0 0 0 0 0
37851- 0 0 0 0 0 0 0 0 0 0 0 0
37852- 0 0 0 0 0 0 0 0 0 0 0 0
37853- 0 0 0 0 0 0 0 0 0 0 0 0
37854- 0 0 0 6 6 6 6 6 6 10 10 10
37855- 18 18 18 22 22 22 30 30 30 42 42 42
37856- 50 50 50 66 66 66 86 86 86 101 98 89
37857-106 86 58 98 70 6 104 69 6 104 69 6
37858-104 69 6 91 60 6 82 62 34 90 90 90
37859- 62 62 62 38 38 38 22 22 22 14 14 14
37860- 10 10 10 10 10 10 10 10 10 10 10 10
37861- 10 10 10 10 10 10 6 6 6 10 10 10
37862- 10 10 10 10 10 10 10 10 10 14 14 14
37863- 22 22 22 42 42 42 70 70 70 89 81 66
37864- 80 54 7 104 69 6 124 80 6 137 92 6
37865-134 86 6 116 81 8 100 82 52 86 86 86
37866- 58 58 58 30 30 30 14 14 14 6 6 6
37867- 0 0 0 0 0 0 0 0 0 0 0 0
37868- 0 0 0 0 0 0 0 0 0 0 0 0
37869- 0 0 0 0 0 0 0 0 0 0 0 0
37870- 0 0 0 0 0 0 0 0 0 0 0 0
37871- 0 0 0 0 0 0 0 0 0 0 0 0
37872- 0 0 0 0 0 0 0 0 0 0 0 0
37873- 0 0 0 0 0 0 0 0 0 0 0 0
37874- 0 0 0 0 0 0 0 0 0 0 0 0
37875- 0 0 0 6 6 6 10 10 10 14 14 14
37876- 18 18 18 26 26 26 38 38 38 54 54 54
37877- 70 70 70 86 86 86 94 86 76 89 81 66
37878- 89 81 66 86 86 86 74 74 74 50 50 50
37879- 30 30 30 14 14 14 6 6 6 0 0 0
37880- 0 0 0 0 0 0 0 0 0 0 0 0
37881- 0 0 0 0 0 0 0 0 0 0 0 0
37882- 0 0 0 0 0 0 0 0 0 0 0 0
37883- 6 6 6 18 18 18 34 34 34 58 58 58
37884- 82 82 82 89 81 66 89 81 66 89 81 66
37885- 94 86 66 94 86 76 74 74 74 50 50 50
37886- 26 26 26 14 14 14 6 6 6 0 0 0
37887- 0 0 0 0 0 0 0 0 0 0 0 0
37888- 0 0 0 0 0 0 0 0 0 0 0 0
37889- 0 0 0 0 0 0 0 0 0 0 0 0
37890- 0 0 0 0 0 0 0 0 0 0 0 0
37891- 0 0 0 0 0 0 0 0 0 0 0 0
37892- 0 0 0 0 0 0 0 0 0 0 0 0
37893- 0 0 0 0 0 0 0 0 0 0 0 0
37894- 0 0 0 0 0 0 0 0 0 0 0 0
37895- 0 0 0 0 0 0 0 0 0 0 0 0
37896- 6 6 6 6 6 6 14 14 14 18 18 18
37897- 30 30 30 38 38 38 46 46 46 54 54 54
37898- 50 50 50 42 42 42 30 30 30 18 18 18
37899- 10 10 10 0 0 0 0 0 0 0 0 0
37900- 0 0 0 0 0 0 0 0 0 0 0 0
37901- 0 0 0 0 0 0 0 0 0 0 0 0
37902- 0 0 0 0 0 0 0 0 0 0 0 0
37903- 0 0 0 6 6 6 14 14 14 26 26 26
37904- 38 38 38 50 50 50 58 58 58 58 58 58
37905- 54 54 54 42 42 42 30 30 30 18 18 18
37906- 10 10 10 0 0 0 0 0 0 0 0 0
37907- 0 0 0 0 0 0 0 0 0 0 0 0
37908- 0 0 0 0 0 0 0 0 0 0 0 0
37909- 0 0 0 0 0 0 0 0 0 0 0 0
37910- 0 0 0 0 0 0 0 0 0 0 0 0
37911- 0 0 0 0 0 0 0 0 0 0 0 0
37912- 0 0 0 0 0 0 0 0 0 0 0 0
37913- 0 0 0 0 0 0 0 0 0 0 0 0
37914- 0 0 0 0 0 0 0 0 0 0 0 0
37915- 0 0 0 0 0 0 0 0 0 0 0 0
37916- 0 0 0 0 0 0 0 0 0 6 6 6
37917- 6 6 6 10 10 10 14 14 14 18 18 18
37918- 18 18 18 14 14 14 10 10 10 6 6 6
37919- 0 0 0 0 0 0 0 0 0 0 0 0
37920- 0 0 0 0 0 0 0 0 0 0 0 0
37921- 0 0 0 0 0 0 0 0 0 0 0 0
37922- 0 0 0 0 0 0 0 0 0 0 0 0
37923- 0 0 0 0 0 0 0 0 0 6 6 6
37924- 14 14 14 18 18 18 22 22 22 22 22 22
37925- 18 18 18 14 14 14 10 10 10 6 6 6
37926- 0 0 0 0 0 0 0 0 0 0 0 0
37927- 0 0 0 0 0 0 0 0 0 0 0 0
37928- 0 0 0 0 0 0 0 0 0 0 0 0
37929- 0 0 0 0 0 0 0 0 0 0 0 0
37930- 0 0 0 0 0 0 0 0 0 0 0 0
37931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37944+4 4 4 4 4 4
37945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37958+4 4 4 4 4 4
37959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37972+4 4 4 4 4 4
37973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37986+4 4 4 4 4 4
37987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38000+4 4 4 4 4 4
38001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38014+4 4 4 4 4 4
38015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38019+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38020+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38024+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38025+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38026+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38028+4 4 4 4 4 4
38029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38033+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38034+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38035+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38038+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38039+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38040+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38041+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38042+4 4 4 4 4 4
38043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38047+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38048+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38049+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38052+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38053+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38054+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38055+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38056+4 4 4 4 4 4
38057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38060+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38061+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38062+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38063+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38065+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38066+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38067+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38068+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38069+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38070+4 4 4 4 4 4
38071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38074+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38075+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38076+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38077+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38078+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38079+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38080+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38081+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38082+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38083+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38084+4 4 4 4 4 4
38085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38088+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38089+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38090+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38091+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38092+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38093+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38094+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38095+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38096+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38097+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38098+4 4 4 4 4 4
38099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38101+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38102+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38103+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38104+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38105+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38106+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38107+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38108+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38109+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38110+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38111+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38112+4 4 4 4 4 4
38113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38115+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38116+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38117+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38118+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38119+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38120+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38121+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38122+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38123+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38124+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38125+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38126+4 4 4 4 4 4
38127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38129+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38130+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38131+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38132+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38133+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38134+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38135+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38136+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38137+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38138+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38139+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38140+4 4 4 4 4 4
38141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38143+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38144+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38145+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38146+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38147+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38148+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38149+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38150+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38151+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38152+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38153+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38154+4 4 4 4 4 4
38155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38156+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38157+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38158+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38159+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38160+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38161+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38162+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38163+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38164+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38165+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38166+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38167+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38168+4 4 4 4 4 4
38169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38170+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38171+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38172+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38173+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38174+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38175+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38176+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38177+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38178+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38179+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38180+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38181+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38182+0 0 0 4 4 4
38183+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38184+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38185+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38186+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38187+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38188+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38189+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38190+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38191+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38192+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38193+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38194+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38195+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38196+2 0 0 0 0 0
38197+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38198+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38199+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38200+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38201+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38202+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38203+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38204+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38205+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38206+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38207+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38208+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38209+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38210+37 38 37 0 0 0
38211+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38212+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38213+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38214+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38215+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38216+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38217+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38218+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38219+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38220+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38221+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38222+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38223+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38224+85 115 134 4 0 0
38225+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38226+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38227+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38228+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38229+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38230+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38231+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38232+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38233+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38234+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38235+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38236+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38237+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38238+60 73 81 4 0 0
38239+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38240+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38241+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38242+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38243+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38244+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38245+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38246+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38247+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38248+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38249+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38250+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38251+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38252+16 19 21 4 0 0
38253+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38254+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38255+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38256+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38257+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38258+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38259+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38260+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38261+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38262+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38263+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38264+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38265+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38266+4 0 0 4 3 3
38267+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38268+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38269+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38271+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38272+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38273+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38274+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38275+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38276+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38277+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38278+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38279+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38280+3 2 2 4 4 4
38281+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38282+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38283+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38284+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38285+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38286+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38287+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38288+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38289+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38290+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38291+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38292+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38293+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38294+4 4 4 4 4 4
38295+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38296+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38297+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38298+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38299+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38300+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38301+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38302+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38303+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38304+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38305+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38306+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38307+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38308+4 4 4 4 4 4
38309+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38310+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38311+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38312+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38313+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38314+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38315+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38316+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38317+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38318+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38319+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38320+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38321+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38322+5 5 5 5 5 5
38323+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38324+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38325+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38326+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38327+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38328+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38329+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38330+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38331+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38332+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38333+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38334+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38335+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38336+5 5 5 4 4 4
38337+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38338+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38339+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38340+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38341+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38342+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38343+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38344+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38345+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38346+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38347+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38348+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38350+4 4 4 4 4 4
38351+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38352+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38353+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38354+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38355+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38356+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38357+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38358+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38359+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38360+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38361+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38362+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38364+4 4 4 4 4 4
38365+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38366+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38367+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38368+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38369+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38370+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38371+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38372+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38373+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38374+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38375+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38378+4 4 4 4 4 4
38379+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38380+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38381+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38382+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38383+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38384+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38385+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38386+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38387+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38388+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38389+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38392+4 4 4 4 4 4
38393+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38394+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38395+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38396+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38397+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38398+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38399+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38400+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38401+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38402+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38403+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38406+4 4 4 4 4 4
38407+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38408+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38409+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38410+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38411+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38412+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38413+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38414+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38415+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38416+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38417+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38420+4 4 4 4 4 4
38421+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38422+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38423+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38424+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38425+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38426+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38427+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38428+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38429+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38430+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38431+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38434+4 4 4 4 4 4
38435+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38436+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38437+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38438+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38439+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38440+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38441+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38442+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38443+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38444+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38445+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38448+4 4 4 4 4 4
38449+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38450+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38451+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38452+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38453+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38454+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38455+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38456+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38457+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38458+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38459+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38462+4 4 4 4 4 4
38463+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38464+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38465+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38466+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38467+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38468+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38469+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38470+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38471+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38472+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38473+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38476+4 4 4 4 4 4
38477+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38478+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38479+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38480+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38481+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38482+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38483+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38484+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38485+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38486+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38487+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38490+4 4 4 4 4 4
38491+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38492+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38493+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38494+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38495+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38496+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38497+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38498+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38499+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38500+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38501+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38504+4 4 4 4 4 4
38505+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38506+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38507+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38508+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38509+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38510+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38511+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38512+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38513+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38514+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38515+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38518+4 4 4 4 4 4
38519+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38520+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38521+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38522+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38523+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38524+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38525+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38526+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38527+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38528+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38529+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38532+4 4 4 4 4 4
38533+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38534+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38535+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38536+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38537+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38538+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38539+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38540+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38541+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38542+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38543+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38546+4 4 4 4 4 4
38547+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38548+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38549+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38550+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38551+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38552+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38553+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38554+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38555+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38556+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38557+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38560+4 4 4 4 4 4
38561+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38562+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38563+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38564+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38565+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38566+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38567+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38568+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38569+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38570+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38571+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38574+4 4 4 4 4 4
38575+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38576+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38577+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38578+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38579+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38580+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38581+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38582+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38583+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38584+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38585+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38588+4 4 4 4 4 4
38589+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38590+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38591+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38592+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38593+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38594+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38595+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38596+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38597+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38598+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38599+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38602+4 4 4 4 4 4
38603+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38604+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38605+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38606+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38607+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38608+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38609+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38610+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38611+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38612+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38613+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38616+4 4 4 4 4 4
38617+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38618+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38619+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38620+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38621+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38622+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38623+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38624+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38625+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38626+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38627+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38630+4 4 4 4 4 4
38631+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38632+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38633+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38634+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38635+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38636+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38637+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38638+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38639+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38640+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38641+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38644+4 4 4 4 4 4
38645+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38646+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38647+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38648+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38649+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38650+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38651+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38652+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38653+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38654+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38655+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38658+4 4 4 4 4 4
38659+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38660+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38661+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38662+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38663+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38664+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38665+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38666+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38667+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38668+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38669+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38672+4 4 4 4 4 4
38673+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38674+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38675+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38676+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38677+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38678+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38679+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38680+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38681+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38682+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38683+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38686+4 4 4 4 4 4
38687+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38688+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38689+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38690+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38691+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38692+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38693+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38694+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38695+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38696+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38697+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38700+4 4 4 4 4 4
38701+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38702+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38703+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38704+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38705+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38706+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38707+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38708+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38709+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38710+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38711+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38714+4 4 4 4 4 4
38715+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38716+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38717+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38718+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38719+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38720+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38721+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38722+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38723+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38724+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38725+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38728+4 4 4 4 4 4
38729+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38730+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38731+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38732+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38733+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38734+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38735+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38736+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38737+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38738+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38739+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38742+4 4 4 4 4 4
38743+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38744+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38745+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38746+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38747+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38748+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38749+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38750+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38751+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38752+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38756+4 4 4 4 4 4
38757+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38758+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38759+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38760+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38761+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38762+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38763+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38764+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38765+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38766+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38770+4 4 4 4 4 4
38771+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38772+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38773+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38774+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38775+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38776+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38777+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38778+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38779+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38780+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38784+4 4 4 4 4 4
38785+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
38786+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
38787+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
38788+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
38789+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
38790+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
38791+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
38792+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
38793+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38794+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38798+4 4 4 4 4 4
38799+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
38800+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38801+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
38802+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38803+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
38804+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
38805+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
38806+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
38807+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
38808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38812+4 4 4 4 4 4
38813+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
38814+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
38815+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
38816+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
38817+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
38818+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
38819+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
38820+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
38821+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
38822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38826+4 4 4 4 4 4
38827+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38828+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
38829+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
38830+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
38831+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
38832+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
38833+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
38834+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
38835+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38840+4 4 4 4 4 4
38841+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
38842+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
38843+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38844+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
38845+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
38846+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
38847+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
38848+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
38849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38854+4 4 4 4 4 4
38855+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38856+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
38857+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
38858+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
38859+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
38860+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
38861+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
38862+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38868+4 4 4 4 4 4
38869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38870+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
38871+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38872+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
38873+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
38874+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
38875+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
38876+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
38877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38882+4 4 4 4 4 4
38883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38884+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
38885+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
38886+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
38887+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
38888+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
38889+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
38890+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
38891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38896+4 4 4 4 4 4
38897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38898+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38899+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
38900+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38901+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
38902+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
38903+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
38904+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38910+4 4 4 4 4 4
38911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38913+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38914+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
38915+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
38916+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
38917+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
38918+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38924+4 4 4 4 4 4
38925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38928+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38929+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
38930+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
38931+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
38932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38938+4 4 4 4 4 4
38939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38942+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38943+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38944+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
38945+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
38946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38952+4 4 4 4 4 4
38953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38956+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38957+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38958+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38959+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
38960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38966+4 4 4 4 4 4
38967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38970+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
38971+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
38972+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
38973+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
38974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38980+4 4 4 4 4 4
38981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38985+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
38986+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38987+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38994+4 4 4 4 4 4
38995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38999+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39000+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39001+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39008+4 4 4 4 4 4
39009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39013+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39014+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39015+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39022+4 4 4 4 4 4
39023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39027+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39028+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39036+4 4 4 4 4 4
39037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39041+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39042+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39050+4 4 4 4 4 4
39051diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
39052index 3473e75..c930142 100644
39053--- a/drivers/video/udlfb.c
39054+++ b/drivers/video/udlfb.c
39055@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
39056 dlfb_urb_completion(urb);
39057
39058 error:
39059- atomic_add(bytes_sent, &dev->bytes_sent);
39060- atomic_add(bytes_identical, &dev->bytes_identical);
39061- atomic_add(width*height*2, &dev->bytes_rendered);
39062+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39063+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39064+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39065 end_cycles = get_cycles();
39066- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39067+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39068 >> 10)), /* Kcycles */
39069 &dev->cpu_kcycles_used);
39070
39071@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
39072 dlfb_urb_completion(urb);
39073
39074 error:
39075- atomic_add(bytes_sent, &dev->bytes_sent);
39076- atomic_add(bytes_identical, &dev->bytes_identical);
39077- atomic_add(bytes_rendered, &dev->bytes_rendered);
39078+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39079+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39080+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39081 end_cycles = get_cycles();
39082- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39083+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39084 >> 10)), /* Kcycles */
39085 &dev->cpu_kcycles_used);
39086 }
39087@@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
39088 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39089 struct dlfb_data *dev = fb_info->par;
39090 return snprintf(buf, PAGE_SIZE, "%u\n",
39091- atomic_read(&dev->bytes_rendered));
39092+ atomic_read_unchecked(&dev->bytes_rendered));
39093 }
39094
39095 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39096@@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39097 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39098 struct dlfb_data *dev = fb_info->par;
39099 return snprintf(buf, PAGE_SIZE, "%u\n",
39100- atomic_read(&dev->bytes_identical));
39101+ atomic_read_unchecked(&dev->bytes_identical));
39102 }
39103
39104 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39105@@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39106 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39107 struct dlfb_data *dev = fb_info->par;
39108 return snprintf(buf, PAGE_SIZE, "%u\n",
39109- atomic_read(&dev->bytes_sent));
39110+ atomic_read_unchecked(&dev->bytes_sent));
39111 }
39112
39113 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39114@@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39115 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39116 struct dlfb_data *dev = fb_info->par;
39117 return snprintf(buf, PAGE_SIZE, "%u\n",
39118- atomic_read(&dev->cpu_kcycles_used));
39119+ atomic_read_unchecked(&dev->cpu_kcycles_used));
39120 }
39121
39122 static ssize_t edid_show(
39123@@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
39124 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39125 struct dlfb_data *dev = fb_info->par;
39126
39127- atomic_set(&dev->bytes_rendered, 0);
39128- atomic_set(&dev->bytes_identical, 0);
39129- atomic_set(&dev->bytes_sent, 0);
39130- atomic_set(&dev->cpu_kcycles_used, 0);
39131+ atomic_set_unchecked(&dev->bytes_rendered, 0);
39132+ atomic_set_unchecked(&dev->bytes_identical, 0);
39133+ atomic_set_unchecked(&dev->bytes_sent, 0);
39134+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39135
39136 return count;
39137 }
39138diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
39139index 7f8472c..9842e87 100644
39140--- a/drivers/video/uvesafb.c
39141+++ b/drivers/video/uvesafb.c
39142@@ -19,6 +19,7 @@
39143 #include <linux/io.h>
39144 #include <linux/mutex.h>
39145 #include <linux/slab.h>
39146+#include <linux/moduleloader.h>
39147 #include <video/edid.h>
39148 #include <video/uvesafb.h>
39149 #ifdef CONFIG_X86
39150@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39151 NULL,
39152 };
39153
39154- return call_usermodehelper(v86d_path, argv, envp, 1);
39155+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39156 }
39157
39158 /*
39159@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
39160 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39161 par->pmi_setpal = par->ypan = 0;
39162 } else {
39163+
39164+#ifdef CONFIG_PAX_KERNEXEC
39165+#ifdef CONFIG_MODULES
39166+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39167+#endif
39168+ if (!par->pmi_code) {
39169+ par->pmi_setpal = par->ypan = 0;
39170+ return 0;
39171+ }
39172+#endif
39173+
39174 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39175 + task->t.regs.edi);
39176+
39177+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39178+ pax_open_kernel();
39179+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39180+ pax_close_kernel();
39181+
39182+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39183+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39184+#else
39185 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39186 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39187+#endif
39188+
39189 printk(KERN_INFO "uvesafb: protected mode interface info at "
39190 "%04x:%04x\n",
39191 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39192@@ -1821,6 +1844,11 @@ out:
39193 if (par->vbe_modes)
39194 kfree(par->vbe_modes);
39195
39196+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39197+ if (par->pmi_code)
39198+ module_free_exec(NULL, par->pmi_code);
39199+#endif
39200+
39201 framebuffer_release(info);
39202 return err;
39203 }
39204@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
39205 kfree(par->vbe_state_orig);
39206 if (par->vbe_state_saved)
39207 kfree(par->vbe_state_saved);
39208+
39209+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39210+ if (par->pmi_code)
39211+ module_free_exec(NULL, par->pmi_code);
39212+#endif
39213+
39214 }
39215
39216 framebuffer_release(info);
39217diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
39218index 501b340..86bd4cf 100644
39219--- a/drivers/video/vesafb.c
39220+++ b/drivers/video/vesafb.c
39221@@ -9,6 +9,7 @@
39222 */
39223
39224 #include <linux/module.h>
39225+#include <linux/moduleloader.h>
39226 #include <linux/kernel.h>
39227 #include <linux/errno.h>
39228 #include <linux/string.h>
39229@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
39230 static int vram_total __initdata; /* Set total amount of memory */
39231 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39232 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39233-static void (*pmi_start)(void) __read_mostly;
39234-static void (*pmi_pal) (void) __read_mostly;
39235+static void (*pmi_start)(void) __read_only;
39236+static void (*pmi_pal) (void) __read_only;
39237 static int depth __read_mostly;
39238 static int vga_compat __read_mostly;
39239 /* --------------------------------------------------------------------- */
39240@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
39241 unsigned int size_vmode;
39242 unsigned int size_remap;
39243 unsigned int size_total;
39244+ void *pmi_code = NULL;
39245
39246 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39247 return -ENODEV;
39248@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
39249 size_remap = size_total;
39250 vesafb_fix.smem_len = size_remap;
39251
39252-#ifndef __i386__
39253- screen_info.vesapm_seg = 0;
39254-#endif
39255-
39256 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39257 printk(KERN_WARNING
39258 "vesafb: cannot reserve video memory at 0x%lx\n",
39259@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
39260 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39261 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39262
39263+#ifdef __i386__
39264+
39265+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39266+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
39267+ if (!pmi_code)
39268+#elif !defined(CONFIG_PAX_KERNEXEC)
39269+ if (0)
39270+#endif
39271+
39272+#endif
39273+ screen_info.vesapm_seg = 0;
39274+
39275 if (screen_info.vesapm_seg) {
39276- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39277- screen_info.vesapm_seg,screen_info.vesapm_off);
39278+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39279+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39280 }
39281
39282 if (screen_info.vesapm_seg < 0xc000)
39283@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
39284
39285 if (ypan || pmi_setpal) {
39286 unsigned short *pmi_base;
39287+
39288 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39289- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39290- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39291+
39292+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39293+ pax_open_kernel();
39294+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39295+#else
39296+ pmi_code = pmi_base;
39297+#endif
39298+
39299+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39300+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39301+
39302+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39303+ pmi_start = ktva_ktla(pmi_start);
39304+ pmi_pal = ktva_ktla(pmi_pal);
39305+ pax_close_kernel();
39306+#endif
39307+
39308 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39309 if (pmi_base[3]) {
39310 printk(KERN_INFO "vesafb: pmi: ports = ");
39311@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
39312 info->node, info->fix.id);
39313 return 0;
39314 err:
39315+
39316+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39317+ module_free_exec(NULL, pmi_code);
39318+#endif
39319+
39320 if (info->screen_base)
39321 iounmap(info->screen_base);
39322 framebuffer_release(info);
39323diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
39324index 88714ae..16c2e11 100644
39325--- a/drivers/video/via/via_clock.h
39326+++ b/drivers/video/via/via_clock.h
39327@@ -56,7 +56,7 @@ struct via_clock {
39328
39329 void (*set_engine_pll_state)(u8 state);
39330 void (*set_engine_pll)(struct via_pll_config config);
39331-};
39332+} __no_const;
39333
39334
39335 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39336diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
39337index e56c934..fc22f4b 100644
39338--- a/drivers/xen/xen-pciback/conf_space.h
39339+++ b/drivers/xen/xen-pciback/conf_space.h
39340@@ -44,15 +44,15 @@ struct config_field {
39341 struct {
39342 conf_dword_write write;
39343 conf_dword_read read;
39344- } dw;
39345+ } __no_const dw;
39346 struct {
39347 conf_word_write write;
39348 conf_word_read read;
39349- } w;
39350+ } __no_const w;
39351 struct {
39352 conf_byte_write write;
39353 conf_byte_read read;
39354- } b;
39355+ } __no_const b;
39356 } u;
39357 struct list_head list;
39358 };
39359diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
39360index 879ed88..bc03a01 100644
39361--- a/fs/9p/vfs_inode.c
39362+++ b/fs/9p/vfs_inode.c
39363@@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
39364 void
39365 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39366 {
39367- char *s = nd_get_link(nd);
39368+ const char *s = nd_get_link(nd);
39369
39370 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39371 IS_ERR(s) ? "<error>" : s);
39372diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
39373index 79e2ca7..5828ad1 100644
39374--- a/fs/Kconfig.binfmt
39375+++ b/fs/Kconfig.binfmt
39376@@ -86,7 +86,7 @@ config HAVE_AOUT
39377
39378 config BINFMT_AOUT
39379 tristate "Kernel support for a.out and ECOFF binaries"
39380- depends on HAVE_AOUT
39381+ depends on HAVE_AOUT && BROKEN
39382 ---help---
39383 A.out (Assembler.OUTput) is a set of formats for libraries and
39384 executables used in the earliest versions of UNIX. Linux used
39385diff --git a/fs/aio.c b/fs/aio.c
39386index 969beb0..09fab51 100644
39387--- a/fs/aio.c
39388+++ b/fs/aio.c
39389@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
39390 size += sizeof(struct io_event) * nr_events;
39391 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39392
39393- if (nr_pages < 0)
39394+ if (nr_pages <= 0)
39395 return -EINVAL;
39396
39397 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39398@@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
39399 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39400 {
39401 ssize_t ret;
39402+ struct iovec iovstack;
39403
39404 #ifdef CONFIG_COMPAT
39405 if (compat)
39406 ret = compat_rw_copy_check_uvector(type,
39407 (struct compat_iovec __user *)kiocb->ki_buf,
39408- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39409+ kiocb->ki_nbytes, 1, &iovstack,
39410 &kiocb->ki_iovec, 1);
39411 else
39412 #endif
39413 ret = rw_copy_check_uvector(type,
39414 (struct iovec __user *)kiocb->ki_buf,
39415- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39416+ kiocb->ki_nbytes, 1, &iovstack,
39417 &kiocb->ki_iovec, 1);
39418 if (ret < 0)
39419 goto out;
39420
39421+ if (kiocb->ki_iovec == &iovstack) {
39422+ kiocb->ki_inline_vec = iovstack;
39423+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
39424+ }
39425 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39426 kiocb->ki_cur_seg = 0;
39427 /* ki_nbytes/left now reflect bytes instead of segs */
39428diff --git a/fs/attr.c b/fs/attr.c
39429index 7ee7ba4..0c61a60 100644
39430--- a/fs/attr.c
39431+++ b/fs/attr.c
39432@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
39433 unsigned long limit;
39434
39435 limit = rlimit(RLIMIT_FSIZE);
39436+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39437 if (limit != RLIM_INFINITY && offset > limit)
39438 goto out_sig;
39439 if (offset > inode->i_sb->s_maxbytes)
39440diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
39441index e1fbdee..cd5ea56 100644
39442--- a/fs/autofs4/waitq.c
39443+++ b/fs/autofs4/waitq.c
39444@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
39445 {
39446 unsigned long sigpipe, flags;
39447 mm_segment_t fs;
39448- const char *data = (const char *)addr;
39449+ const char __user *data = (const char __force_user *)addr;
39450 ssize_t wr = 0;
39451
39452 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39453diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
39454index 8342ca6..82fd192 100644
39455--- a/fs/befs/linuxvfs.c
39456+++ b/fs/befs/linuxvfs.c
39457@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39458 {
39459 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39460 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39461- char *link = nd_get_link(nd);
39462+ const char *link = nd_get_link(nd);
39463 if (!IS_ERR(link))
39464 kfree(link);
39465 }
39466diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
39467index a6395bd..a5b24c4 100644
39468--- a/fs/binfmt_aout.c
39469+++ b/fs/binfmt_aout.c
39470@@ -16,6 +16,7 @@
39471 #include <linux/string.h>
39472 #include <linux/fs.h>
39473 #include <linux/file.h>
39474+#include <linux/security.h>
39475 #include <linux/stat.h>
39476 #include <linux/fcntl.h>
39477 #include <linux/ptrace.h>
39478@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
39479 #endif
39480 # define START_STACK(u) ((void __user *)u.start_stack)
39481
39482+ memset(&dump, 0, sizeof(dump));
39483+
39484 fs = get_fs();
39485 set_fs(KERNEL_DS);
39486 has_dumped = 1;
39487@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
39488
39489 /* If the size of the dump file exceeds the rlimit, then see what would happen
39490 if we wrote the stack, but not the data area. */
39491+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39492 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39493 dump.u_dsize = 0;
39494
39495 /* Make sure we have enough room to write the stack and data areas. */
39496+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39497 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39498 dump.u_ssize = 0;
39499
39500@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39501 rlim = rlimit(RLIMIT_DATA);
39502 if (rlim >= RLIM_INFINITY)
39503 rlim = ~0;
39504+
39505+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39506 if (ex.a_data + ex.a_bss > rlim)
39507 return -ENOMEM;
39508
39509@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39510 install_exec_creds(bprm);
39511 current->flags &= ~PF_FORKNOEXEC;
39512
39513+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39514+ current->mm->pax_flags = 0UL;
39515+#endif
39516+
39517+#ifdef CONFIG_PAX_PAGEEXEC
39518+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39519+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39520+
39521+#ifdef CONFIG_PAX_EMUTRAMP
39522+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39523+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39524+#endif
39525+
39526+#ifdef CONFIG_PAX_MPROTECT
39527+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39528+ current->mm->pax_flags |= MF_PAX_MPROTECT;
39529+#endif
39530+
39531+ }
39532+#endif
39533+
39534 if (N_MAGIC(ex) == OMAGIC) {
39535 unsigned long text_addr, map_size;
39536 loff_t pos;
39537@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39538
39539 down_write(&current->mm->mmap_sem);
39540 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39541- PROT_READ | PROT_WRITE | PROT_EXEC,
39542+ PROT_READ | PROT_WRITE,
39543 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39544 fd_offset + ex.a_text);
39545 up_write(&current->mm->mmap_sem);
39546diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
39547index 21ac5ee..c1090ea 100644
39548--- a/fs/binfmt_elf.c
39549+++ b/fs/binfmt_elf.c
39550@@ -32,6 +32,7 @@
39551 #include <linux/elf.h>
39552 #include <linux/utsname.h>
39553 #include <linux/coredump.h>
39554+#include <linux/xattr.h>
39555 #include <asm/uaccess.h>
39556 #include <asm/param.h>
39557 #include <asm/page.h>
39558@@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
39559 #define elf_core_dump NULL
39560 #endif
39561
39562+#ifdef CONFIG_PAX_MPROTECT
39563+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39564+#endif
39565+
39566 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39567 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39568 #else
39569@@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
39570 .load_binary = load_elf_binary,
39571 .load_shlib = load_elf_library,
39572 .core_dump = elf_core_dump,
39573+
39574+#ifdef CONFIG_PAX_MPROTECT
39575+ .handle_mprotect= elf_handle_mprotect,
39576+#endif
39577+
39578 .min_coredump = ELF_EXEC_PAGESIZE,
39579 };
39580
39581@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
39582
39583 static int set_brk(unsigned long start, unsigned long end)
39584 {
39585+ unsigned long e = end;
39586+
39587 start = ELF_PAGEALIGN(start);
39588 end = ELF_PAGEALIGN(end);
39589 if (end > start) {
39590@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
39591 if (BAD_ADDR(addr))
39592 return addr;
39593 }
39594- current->mm->start_brk = current->mm->brk = end;
39595+ current->mm->start_brk = current->mm->brk = e;
39596 return 0;
39597 }
39598
39599@@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39600 elf_addr_t __user *u_rand_bytes;
39601 const char *k_platform = ELF_PLATFORM;
39602 const char *k_base_platform = ELF_BASE_PLATFORM;
39603- unsigned char k_rand_bytes[16];
39604+ u32 k_rand_bytes[4];
39605 int items;
39606 elf_addr_t *elf_info;
39607 int ei_index = 0;
39608 const struct cred *cred = current_cred();
39609 struct vm_area_struct *vma;
39610+ unsigned long saved_auxv[AT_VECTOR_SIZE];
39611
39612 /*
39613 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39614@@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39615 * Generate 16 random bytes for userspace PRNG seeding.
39616 */
39617 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39618- u_rand_bytes = (elf_addr_t __user *)
39619- STACK_ALLOC(p, sizeof(k_rand_bytes));
39620+ srandom32(k_rand_bytes[0] ^ random32());
39621+ srandom32(k_rand_bytes[1] ^ random32());
39622+ srandom32(k_rand_bytes[2] ^ random32());
39623+ srandom32(k_rand_bytes[3] ^ random32());
39624+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39625+ u_rand_bytes = (elf_addr_t __user *) p;
39626 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39627 return -EFAULT;
39628
39629@@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39630 return -EFAULT;
39631 current->mm->env_end = p;
39632
39633+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39634+
39635 /* Put the elf_info on the stack in the right place. */
39636 sp = (elf_addr_t __user *)envp + 1;
39637- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39638+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39639 return -EFAULT;
39640 return 0;
39641 }
39642@@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39643 {
39644 struct elf_phdr *elf_phdata;
39645 struct elf_phdr *eppnt;
39646- unsigned long load_addr = 0;
39647+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39648 int load_addr_set = 0;
39649 unsigned long last_bss = 0, elf_bss = 0;
39650- unsigned long error = ~0UL;
39651+ unsigned long error = -EINVAL;
39652 unsigned long total_size;
39653 int retval, i, size;
39654
39655@@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39656 goto out_close;
39657 }
39658
39659+#ifdef CONFIG_PAX_SEGMEXEC
39660+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39661+ pax_task_size = SEGMEXEC_TASK_SIZE;
39662+#endif
39663+
39664 eppnt = elf_phdata;
39665 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39666 if (eppnt->p_type == PT_LOAD) {
39667@@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39668 k = load_addr + eppnt->p_vaddr;
39669 if (BAD_ADDR(k) ||
39670 eppnt->p_filesz > eppnt->p_memsz ||
39671- eppnt->p_memsz > TASK_SIZE ||
39672- TASK_SIZE - eppnt->p_memsz < k) {
39673+ eppnt->p_memsz > pax_task_size ||
39674+ pax_task_size - eppnt->p_memsz < k) {
39675 error = -ENOMEM;
39676 goto out_close;
39677 }
39678@@ -528,6 +552,348 @@ out:
39679 return error;
39680 }
39681
39682+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
39683+{
39684+ unsigned long pax_flags = 0UL;
39685+
39686+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39687+
39688+#ifdef CONFIG_PAX_PAGEEXEC
39689+ if (elf_phdata->p_flags & PF_PAGEEXEC)
39690+ pax_flags |= MF_PAX_PAGEEXEC;
39691+#endif
39692+
39693+#ifdef CONFIG_PAX_SEGMEXEC
39694+ if (elf_phdata->p_flags & PF_SEGMEXEC)
39695+ pax_flags |= MF_PAX_SEGMEXEC;
39696+#endif
39697+
39698+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39699+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39700+ if ((__supported_pte_mask & _PAGE_NX))
39701+ pax_flags &= ~MF_PAX_SEGMEXEC;
39702+ else
39703+ pax_flags &= ~MF_PAX_PAGEEXEC;
39704+ }
39705+#endif
39706+
39707+#ifdef CONFIG_PAX_EMUTRAMP
39708+ if (elf_phdata->p_flags & PF_EMUTRAMP)
39709+ pax_flags |= MF_PAX_EMUTRAMP;
39710+#endif
39711+
39712+#ifdef CONFIG_PAX_MPROTECT
39713+ if (elf_phdata->p_flags & PF_MPROTECT)
39714+ pax_flags |= MF_PAX_MPROTECT;
39715+#endif
39716+
39717+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39718+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39719+ pax_flags |= MF_PAX_RANDMMAP;
39720+#endif
39721+
39722+#endif
39723+
39724+ return pax_flags;
39725+}
39726+
39727+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
39728+{
39729+ unsigned long pax_flags = 0UL;
39730+
39731+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39732+
39733+#ifdef CONFIG_PAX_PAGEEXEC
39734+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39735+ pax_flags |= MF_PAX_PAGEEXEC;
39736+#endif
39737+
39738+#ifdef CONFIG_PAX_SEGMEXEC
39739+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39740+ pax_flags |= MF_PAX_SEGMEXEC;
39741+#endif
39742+
39743+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39744+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39745+ if ((__supported_pte_mask & _PAGE_NX))
39746+ pax_flags &= ~MF_PAX_SEGMEXEC;
39747+ else
39748+ pax_flags &= ~MF_PAX_PAGEEXEC;
39749+ }
39750+#endif
39751+
39752+#ifdef CONFIG_PAX_EMUTRAMP
39753+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39754+ pax_flags |= MF_PAX_EMUTRAMP;
39755+#endif
39756+
39757+#ifdef CONFIG_PAX_MPROTECT
39758+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39759+ pax_flags |= MF_PAX_MPROTECT;
39760+#endif
39761+
39762+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39763+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39764+ pax_flags |= MF_PAX_RANDMMAP;
39765+#endif
39766+
39767+#endif
39768+
39769+ return pax_flags;
39770+}
39771+
39772+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39773+{
39774+ unsigned long pax_flags = 0UL;
39775+
39776+#ifdef CONFIG_PAX_EI_PAX
39777+
39778+#ifdef CONFIG_PAX_PAGEEXEC
39779+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39780+ pax_flags |= MF_PAX_PAGEEXEC;
39781+#endif
39782+
39783+#ifdef CONFIG_PAX_SEGMEXEC
39784+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39785+ pax_flags |= MF_PAX_SEGMEXEC;
39786+#endif
39787+
39788+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39789+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39790+ if ((__supported_pte_mask & _PAGE_NX))
39791+ pax_flags &= ~MF_PAX_SEGMEXEC;
39792+ else
39793+ pax_flags &= ~MF_PAX_PAGEEXEC;
39794+ }
39795+#endif
39796+
39797+#ifdef CONFIG_PAX_EMUTRAMP
39798+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39799+ pax_flags |= MF_PAX_EMUTRAMP;
39800+#endif
39801+
39802+#ifdef CONFIG_PAX_MPROTECT
39803+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39804+ pax_flags |= MF_PAX_MPROTECT;
39805+#endif
39806+
39807+#ifdef CONFIG_PAX_ASLR
39808+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39809+ pax_flags |= MF_PAX_RANDMMAP;
39810+#endif
39811+
39812+#else
39813+
39814+#ifdef CONFIG_PAX_PAGEEXEC
39815+ pax_flags |= MF_PAX_PAGEEXEC;
39816+#endif
39817+
39818+#ifdef CONFIG_PAX_MPROTECT
39819+ pax_flags |= MF_PAX_MPROTECT;
39820+#endif
39821+
39822+#ifdef CONFIG_PAX_RANDMMAP
39823+ pax_flags |= MF_PAX_RANDMMAP;
39824+#endif
39825+
39826+#ifdef CONFIG_PAX_SEGMEXEC
39827+ if (!(__supported_pte_mask & _PAGE_NX)) {
39828+ pax_flags &= ~MF_PAX_PAGEEXEC;
39829+ pax_flags |= MF_PAX_SEGMEXEC;
39830+ }
39831+#endif
39832+
39833+#endif
39834+
39835+ return pax_flags;
39836+}
39837+
39838+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39839+{
39840+
39841+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39842+ unsigned long i;
39843+
39844+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39845+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39846+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39847+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39848+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39849+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39850+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39851+ return ~0UL;
39852+
39853+#ifdef CONFIG_PAX_SOFTMODE
39854+ if (pax_softmode)
39855+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
39856+ else
39857+#endif
39858+
39859+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
39860+ break;
39861+ }
39862+#endif
39863+
39864+ return ~0UL;
39865+}
39866+
39867+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
39868+{
39869+ unsigned long pax_flags = 0UL;
39870+
39871+#ifdef CONFIG_PAX_PAGEEXEC
39872+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
39873+ pax_flags |= MF_PAX_PAGEEXEC;
39874+#endif
39875+
39876+#ifdef CONFIG_PAX_SEGMEXEC
39877+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
39878+ pax_flags |= MF_PAX_SEGMEXEC;
39879+#endif
39880+
39881+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39882+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39883+ if ((__supported_pte_mask & _PAGE_NX))
39884+ pax_flags &= ~MF_PAX_SEGMEXEC;
39885+ else
39886+ pax_flags &= ~MF_PAX_PAGEEXEC;
39887+ }
39888+#endif
39889+
39890+#ifdef CONFIG_PAX_EMUTRAMP
39891+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
39892+ pax_flags |= MF_PAX_EMUTRAMP;
39893+#endif
39894+
39895+#ifdef CONFIG_PAX_MPROTECT
39896+ if (pax_flags_softmode & MF_PAX_MPROTECT)
39897+ pax_flags |= MF_PAX_MPROTECT;
39898+#endif
39899+
39900+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39901+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
39902+ pax_flags |= MF_PAX_RANDMMAP;
39903+#endif
39904+
39905+ return pax_flags;
39906+}
39907+
39908+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
39909+{
39910+ unsigned long pax_flags = 0UL;
39911+
39912+#ifdef CONFIG_PAX_PAGEEXEC
39913+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
39914+ pax_flags |= MF_PAX_PAGEEXEC;
39915+#endif
39916+
39917+#ifdef CONFIG_PAX_SEGMEXEC
39918+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
39919+ pax_flags |= MF_PAX_SEGMEXEC;
39920+#endif
39921+
39922+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39923+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39924+ if ((__supported_pte_mask & _PAGE_NX))
39925+ pax_flags &= ~MF_PAX_SEGMEXEC;
39926+ else
39927+ pax_flags &= ~MF_PAX_PAGEEXEC;
39928+ }
39929+#endif
39930+
39931+#ifdef CONFIG_PAX_EMUTRAMP
39932+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
39933+ pax_flags |= MF_PAX_EMUTRAMP;
39934+#endif
39935+
39936+#ifdef CONFIG_PAX_MPROTECT
39937+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
39938+ pax_flags |= MF_PAX_MPROTECT;
39939+#endif
39940+
39941+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39942+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
39943+ pax_flags |= MF_PAX_RANDMMAP;
39944+#endif
39945+
39946+ return pax_flags;
39947+}
39948+
39949+static unsigned long pax_parse_xattr_pax(struct file * const file)
39950+{
39951+
39952+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
39953+ ssize_t xattr_size, i;
39954+ unsigned char xattr_value[5];
39955+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
39956+
39957+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
39958+ if (xattr_size <= 0)
39959+ return ~0UL;
39960+
39961+ for (i = 0; i < xattr_size; i++)
39962+ switch (xattr_value[i]) {
39963+ default:
39964+ return ~0UL;
39965+
39966+#define parse_flag(option1, option2, flag) \
39967+ case option1: \
39968+ pax_flags_hardmode |= MF_PAX_##flag; \
39969+ break; \
39970+ case option2: \
39971+ pax_flags_softmode |= MF_PAX_##flag; \
39972+ break;
39973+
39974+ parse_flag('p', 'P', PAGEEXEC);
39975+ parse_flag('e', 'E', EMUTRAMP);
39976+ parse_flag('m', 'M', MPROTECT);
39977+ parse_flag('r', 'R', RANDMMAP);
39978+ parse_flag('s', 'S', SEGMEXEC);
39979+
39980+#undef parse_flag
39981+ }
39982+
39983+ if (pax_flags_hardmode & pax_flags_softmode)
39984+ return ~0UL;
39985+
39986+#ifdef CONFIG_PAX_SOFTMODE
39987+ if (pax_softmode)
39988+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
39989+ else
39990+#endif
39991+
39992+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
39993+#else
39994+ return ~0UL;
39995+#endif
39996+}
39997+
39998+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
39999+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
40000+{
40001+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
40002+
40003+ pax_flags = pax_parse_ei_pax(elf_ex);
40004+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
40005+ xattr_pax_flags = pax_parse_xattr_pax(file);
40006+
40007+ if (pt_pax_flags == ~0UL)
40008+ pt_pax_flags = xattr_pax_flags;
40009+ else if (xattr_pax_flags == ~0UL)
40010+ xattr_pax_flags = pt_pax_flags;
40011+ if (pt_pax_flags != xattr_pax_flags)
40012+ return -EINVAL;
40013+ if (pt_pax_flags != ~0UL)
40014+ pax_flags = pt_pax_flags;
40015+
40016+ if (0 > pax_check_flags(&pax_flags))
40017+ return -EINVAL;
40018+
40019+ current->mm->pax_flags = pax_flags;
40020+ return 0;
40021+}
40022+#endif
40023+
40024 /*
40025 * These are the functions used to load ELF style executables and shared
40026 * libraries. There is no binary dependent code anywhere else.
40027@@ -544,6 +910,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
40028 {
40029 unsigned int random_variable = 0;
40030
40031+#ifdef CONFIG_PAX_RANDUSTACK
40032+ if (randomize_va_space)
40033+ return stack_top - current->mm->delta_stack;
40034+#endif
40035+
40036 if ((current->flags & PF_RANDOMIZE) &&
40037 !(current->personality & ADDR_NO_RANDOMIZE)) {
40038 random_variable = get_random_int() & STACK_RND_MASK;
40039@@ -562,7 +933,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40040 unsigned long load_addr = 0, load_bias = 0;
40041 int load_addr_set = 0;
40042 char * elf_interpreter = NULL;
40043- unsigned long error;
40044+ unsigned long error = 0;
40045 struct elf_phdr *elf_ppnt, *elf_phdata;
40046 unsigned long elf_bss, elf_brk;
40047 int retval, i;
40048@@ -572,11 +943,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40049 unsigned long start_code, end_code, start_data, end_data;
40050 unsigned long reloc_func_desc __maybe_unused = 0;
40051 int executable_stack = EXSTACK_DEFAULT;
40052- unsigned long def_flags = 0;
40053 struct {
40054 struct elfhdr elf_ex;
40055 struct elfhdr interp_elf_ex;
40056 } *loc;
40057+ unsigned long pax_task_size = TASK_SIZE;
40058
40059 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40060 if (!loc) {
40061@@ -713,11 +1084,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40062
40063 /* OK, This is the point of no return */
40064 current->flags &= ~PF_FORKNOEXEC;
40065- current->mm->def_flags = def_flags;
40066+
40067+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40068+ current->mm->pax_flags = 0UL;
40069+#endif
40070+
40071+#ifdef CONFIG_PAX_DLRESOLVE
40072+ current->mm->call_dl_resolve = 0UL;
40073+#endif
40074+
40075+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40076+ current->mm->call_syscall = 0UL;
40077+#endif
40078+
40079+#ifdef CONFIG_PAX_ASLR
40080+ current->mm->delta_mmap = 0UL;
40081+ current->mm->delta_stack = 0UL;
40082+#endif
40083+
40084+ current->mm->def_flags = 0;
40085+
40086+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40087+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
40088+ send_sig(SIGKILL, current, 0);
40089+ goto out_free_dentry;
40090+ }
40091+#endif
40092+
40093+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40094+ pax_set_initial_flags(bprm);
40095+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40096+ if (pax_set_initial_flags_func)
40097+ (pax_set_initial_flags_func)(bprm);
40098+#endif
40099+
40100+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40101+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40102+ current->mm->context.user_cs_limit = PAGE_SIZE;
40103+ current->mm->def_flags |= VM_PAGEEXEC;
40104+ }
40105+#endif
40106+
40107+#ifdef CONFIG_PAX_SEGMEXEC
40108+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40109+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40110+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40111+ pax_task_size = SEGMEXEC_TASK_SIZE;
40112+ current->mm->def_flags |= VM_NOHUGEPAGE;
40113+ }
40114+#endif
40115+
40116+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40117+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40118+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40119+ put_cpu();
40120+ }
40121+#endif
40122
40123 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40124 may depend on the personality. */
40125 SET_PERSONALITY(loc->elf_ex);
40126+
40127+#ifdef CONFIG_PAX_ASLR
40128+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40129+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40130+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40131+ }
40132+#endif
40133+
40134+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40135+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40136+ executable_stack = EXSTACK_DISABLE_X;
40137+ current->personality &= ~READ_IMPLIES_EXEC;
40138+ } else
40139+#endif
40140+
40141 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40142 current->personality |= READ_IMPLIES_EXEC;
40143
40144@@ -808,6 +1249,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40145 #else
40146 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40147 #endif
40148+
40149+#ifdef CONFIG_PAX_RANDMMAP
40150+ /* PaX: randomize base address at the default exe base if requested */
40151+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40152+#ifdef CONFIG_SPARC64
40153+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40154+#else
40155+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40156+#endif
40157+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40158+ elf_flags |= MAP_FIXED;
40159+ }
40160+#endif
40161+
40162 }
40163
40164 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40165@@ -840,9 +1295,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40166 * allowed task size. Note that p_filesz must always be
40167 * <= p_memsz so it is only necessary to check p_memsz.
40168 */
40169- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40170- elf_ppnt->p_memsz > TASK_SIZE ||
40171- TASK_SIZE - elf_ppnt->p_memsz < k) {
40172+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40173+ elf_ppnt->p_memsz > pax_task_size ||
40174+ pax_task_size - elf_ppnt->p_memsz < k) {
40175 /* set_brk can never work. Avoid overflows. */
40176 send_sig(SIGKILL, current, 0);
40177 retval = -EINVAL;
40178@@ -870,6 +1325,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40179 start_data += load_bias;
40180 end_data += load_bias;
40181
40182+#ifdef CONFIG_PAX_RANDMMAP
40183+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40184+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40185+#endif
40186+
40187 /* Calling set_brk effectively mmaps the pages that we need
40188 * for the bss and break sections. We must do this before
40189 * mapping in the interpreter, to make sure it doesn't wind
40190@@ -881,9 +1341,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40191 goto out_free_dentry;
40192 }
40193 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40194- send_sig(SIGSEGV, current, 0);
40195- retval = -EFAULT; /* Nobody gets to see this, but.. */
40196- goto out_free_dentry;
40197+ /*
40198+ * This bss-zeroing can fail if the ELF
40199+ * file specifies odd protections. So
40200+ * we don't check the return value
40201+ */
40202 }
40203
40204 if (elf_interpreter) {
40205@@ -1098,7 +1560,7 @@ out:
40206 * Decide what to dump of a segment, part, all or none.
40207 */
40208 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40209- unsigned long mm_flags)
40210+ unsigned long mm_flags, long signr)
40211 {
40212 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40213
40214@@ -1132,7 +1594,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
40215 if (vma->vm_file == NULL)
40216 return 0;
40217
40218- if (FILTER(MAPPED_PRIVATE))
40219+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40220 goto whole;
40221
40222 /*
40223@@ -1354,9 +1816,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
40224 {
40225 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40226 int i = 0;
40227- do
40228+ do {
40229 i += 2;
40230- while (auxv[i - 2] != AT_NULL);
40231+ } while (auxv[i - 2] != AT_NULL);
40232 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40233 }
40234
40235@@ -1862,14 +2324,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
40236 }
40237
40238 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40239- unsigned long mm_flags)
40240+ struct coredump_params *cprm)
40241 {
40242 struct vm_area_struct *vma;
40243 size_t size = 0;
40244
40245 for (vma = first_vma(current, gate_vma); vma != NULL;
40246 vma = next_vma(vma, gate_vma))
40247- size += vma_dump_size(vma, mm_flags);
40248+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40249 return size;
40250 }
40251
40252@@ -1963,7 +2425,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40253
40254 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40255
40256- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40257+ offset += elf_core_vma_data_size(gate_vma, cprm);
40258 offset += elf_core_extra_data_size();
40259 e_shoff = offset;
40260
40261@@ -1977,10 +2439,12 @@ static int elf_core_dump(struct coredump_params *cprm)
40262 offset = dataoff;
40263
40264 size += sizeof(*elf);
40265+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40266 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40267 goto end_coredump;
40268
40269 size += sizeof(*phdr4note);
40270+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40271 if (size > cprm->limit
40272 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40273 goto end_coredump;
40274@@ -1994,7 +2458,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40275 phdr.p_offset = offset;
40276 phdr.p_vaddr = vma->vm_start;
40277 phdr.p_paddr = 0;
40278- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40279+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40280 phdr.p_memsz = vma->vm_end - vma->vm_start;
40281 offset += phdr.p_filesz;
40282 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40283@@ -2005,6 +2469,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40284 phdr.p_align = ELF_EXEC_PAGESIZE;
40285
40286 size += sizeof(phdr);
40287+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40288 if (size > cprm->limit
40289 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40290 goto end_coredump;
40291@@ -2029,7 +2494,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40292 unsigned long addr;
40293 unsigned long end;
40294
40295- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40296+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40297
40298 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40299 struct page *page;
40300@@ -2038,6 +2503,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40301 page = get_dump_page(addr);
40302 if (page) {
40303 void *kaddr = kmap(page);
40304+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40305 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40306 !dump_write(cprm->file, kaddr,
40307 PAGE_SIZE);
40308@@ -2055,6 +2521,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40309
40310 if (e_phnum == PN_XNUM) {
40311 size += sizeof(*shdr4extnum);
40312+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40313 if (size > cprm->limit
40314 || !dump_write(cprm->file, shdr4extnum,
40315 sizeof(*shdr4extnum)))
40316@@ -2075,6 +2542,97 @@ out:
40317
40318 #endif /* CONFIG_ELF_CORE */
40319
40320+#ifdef CONFIG_PAX_MPROTECT
40321+/* PaX: non-PIC ELF libraries need relocations on their executable segments
40322+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40323+ * we'll remove VM_MAYWRITE for good on RELRO segments.
40324+ *
40325+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40326+ * basis because we want to allow the common case and not the special ones.
40327+ */
40328+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40329+{
40330+ struct elfhdr elf_h;
40331+ struct elf_phdr elf_p;
40332+ unsigned long i;
40333+ unsigned long oldflags;
40334+ bool is_textrel_rw, is_textrel_rx, is_relro;
40335+
40336+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40337+ return;
40338+
40339+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40340+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40341+
40342+#ifdef CONFIG_PAX_ELFRELOCS
40343+ /* possible TEXTREL */
40344+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40345+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40346+#else
40347+ is_textrel_rw = false;
40348+ is_textrel_rx = false;
40349+#endif
40350+
40351+ /* possible RELRO */
40352+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40353+
40354+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40355+ return;
40356+
40357+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40358+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40359+
40360+#ifdef CONFIG_PAX_ETEXECRELOCS
40361+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40362+#else
40363+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40364+#endif
40365+
40366+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40367+ !elf_check_arch(&elf_h) ||
40368+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40369+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40370+ return;
40371+
40372+ for (i = 0UL; i < elf_h.e_phnum; i++) {
40373+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40374+ return;
40375+ switch (elf_p.p_type) {
40376+ case PT_DYNAMIC:
40377+ if (!is_textrel_rw && !is_textrel_rx)
40378+ continue;
40379+ i = 0UL;
40380+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40381+ elf_dyn dyn;
40382+
40383+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40384+ return;
40385+ if (dyn.d_tag == DT_NULL)
40386+ return;
40387+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40388+ gr_log_textrel(vma);
40389+ if (is_textrel_rw)
40390+ vma->vm_flags |= VM_MAYWRITE;
40391+ else
40392+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40393+ vma->vm_flags &= ~VM_MAYWRITE;
40394+ return;
40395+ }
40396+ i++;
40397+ }
40398+ return;
40399+
40400+ case PT_GNU_RELRO:
40401+ if (!is_relro)
40402+ continue;
40403+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40404+ vma->vm_flags &= ~VM_MAYWRITE;
40405+ return;
40406+ }
40407+ }
40408+}
40409+#endif
40410+
40411 static int __init init_elf_binfmt(void)
40412 {
40413 return register_binfmt(&elf_format);
40414diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
40415index 1bffbe0..c8c283e 100644
40416--- a/fs/binfmt_flat.c
40417+++ b/fs/binfmt_flat.c
40418@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
40419 realdatastart = (unsigned long) -ENOMEM;
40420 printk("Unable to allocate RAM for process data, errno %d\n",
40421 (int)-realdatastart);
40422+ down_write(&current->mm->mmap_sem);
40423 do_munmap(current->mm, textpos, text_len);
40424+ up_write(&current->mm->mmap_sem);
40425 ret = realdatastart;
40426 goto err;
40427 }
40428@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40429 }
40430 if (IS_ERR_VALUE(result)) {
40431 printk("Unable to read data+bss, errno %d\n", (int)-result);
40432+ down_write(&current->mm->mmap_sem);
40433 do_munmap(current->mm, textpos, text_len);
40434 do_munmap(current->mm, realdatastart, len);
40435+ up_write(&current->mm->mmap_sem);
40436 ret = result;
40437 goto err;
40438 }
40439@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40440 }
40441 if (IS_ERR_VALUE(result)) {
40442 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40443+ down_write(&current->mm->mmap_sem);
40444 do_munmap(current->mm, textpos, text_len + data_len + extra +
40445 MAX_SHARED_LIBS * sizeof(unsigned long));
40446+ up_write(&current->mm->mmap_sem);
40447 ret = result;
40448 goto err;
40449 }
40450diff --git a/fs/bio.c b/fs/bio.c
40451index b1fe82c..84da0a9 100644
40452--- a/fs/bio.c
40453+++ b/fs/bio.c
40454@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
40455 const int read = bio_data_dir(bio) == READ;
40456 struct bio_map_data *bmd = bio->bi_private;
40457 int i;
40458- char *p = bmd->sgvecs[0].iov_base;
40459+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40460
40461 __bio_for_each_segment(bvec, bio, i, 0) {
40462 char *addr = page_address(bvec->bv_page);
40463diff --git a/fs/block_dev.c b/fs/block_dev.c
40464index b07f1da..9efcb92 100644
40465--- a/fs/block_dev.c
40466+++ b/fs/block_dev.c
40467@@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
40468 else if (bdev->bd_contains == bdev)
40469 return true; /* is a whole device which isn't held */
40470
40471- else if (whole->bd_holder == bd_may_claim)
40472+ else if (whole->bd_holder == (void *)bd_may_claim)
40473 return true; /* is a partition of a device that is being partitioned */
40474 else if (whole->bd_holder != NULL)
40475 return false; /* is a partition of a held device */
40476diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
40477index dede441..f2a2507 100644
40478--- a/fs/btrfs/ctree.c
40479+++ b/fs/btrfs/ctree.c
40480@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
40481 free_extent_buffer(buf);
40482 add_root_to_dirty_list(root);
40483 } else {
40484- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40485- parent_start = parent->start;
40486- else
40487+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40488+ if (parent)
40489+ parent_start = parent->start;
40490+ else
40491+ parent_start = 0;
40492+ } else
40493 parent_start = 0;
40494
40495 WARN_ON(trans->transid != btrfs_header_generation(parent));
40496diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
40497index fd1a06d..6e9033d 100644
40498--- a/fs/btrfs/inode.c
40499+++ b/fs/btrfs/inode.c
40500@@ -6895,7 +6895,7 @@ fail:
40501 return -ENOMEM;
40502 }
40503
40504-static int btrfs_getattr(struct vfsmount *mnt,
40505+int btrfs_getattr(struct vfsmount *mnt,
40506 struct dentry *dentry, struct kstat *stat)
40507 {
40508 struct inode *inode = dentry->d_inode;
40509@@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
40510 return 0;
40511 }
40512
40513+EXPORT_SYMBOL(btrfs_getattr);
40514+
40515+dev_t get_btrfs_dev_from_inode(struct inode *inode)
40516+{
40517+ return BTRFS_I(inode)->root->anon_dev;
40518+}
40519+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40520+
40521 /*
40522 * If a file is moved, it will inherit the cow and compression flags of the new
40523 * directory.
40524diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
40525index c04f02c..f5c9e2e 100644
40526--- a/fs/btrfs/ioctl.c
40527+++ b/fs/btrfs/ioctl.c
40528@@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40529 for (i = 0; i < num_types; i++) {
40530 struct btrfs_space_info *tmp;
40531
40532+ /* Don't copy in more than we allocated */
40533 if (!slot_count)
40534 break;
40535
40536+ slot_count--;
40537+
40538 info = NULL;
40539 rcu_read_lock();
40540 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40541@@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40542 memcpy(dest, &space, sizeof(space));
40543 dest++;
40544 space_args.total_spaces++;
40545- slot_count--;
40546 }
40547- if (!slot_count)
40548- break;
40549 }
40550 up_read(&info->groups_sem);
40551 }
40552
40553- user_dest = (struct btrfs_ioctl_space_info *)
40554+ user_dest = (struct btrfs_ioctl_space_info __user *)
40555 (arg + sizeof(struct btrfs_ioctl_space_args));
40556
40557 if (copy_to_user(user_dest, dest_orig, alloc_size))
40558diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
40559index cfb5543..1ae7347 100644
40560--- a/fs/btrfs/relocation.c
40561+++ b/fs/btrfs/relocation.c
40562@@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
40563 }
40564 spin_unlock(&rc->reloc_root_tree.lock);
40565
40566- BUG_ON((struct btrfs_root *)node->data != root);
40567+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40568
40569 if (!del) {
40570 spin_lock(&rc->reloc_root_tree.lock);
40571diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
40572index 622f469..e8d2d55 100644
40573--- a/fs/cachefiles/bind.c
40574+++ b/fs/cachefiles/bind.c
40575@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
40576 args);
40577
40578 /* start by checking things over */
40579- ASSERT(cache->fstop_percent >= 0 &&
40580- cache->fstop_percent < cache->fcull_percent &&
40581+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40582 cache->fcull_percent < cache->frun_percent &&
40583 cache->frun_percent < 100);
40584
40585- ASSERT(cache->bstop_percent >= 0 &&
40586- cache->bstop_percent < cache->bcull_percent &&
40587+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40588 cache->bcull_percent < cache->brun_percent &&
40589 cache->brun_percent < 100);
40590
40591diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
40592index 0a1467b..6a53245 100644
40593--- a/fs/cachefiles/daemon.c
40594+++ b/fs/cachefiles/daemon.c
40595@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
40596 if (n > buflen)
40597 return -EMSGSIZE;
40598
40599- if (copy_to_user(_buffer, buffer, n) != 0)
40600+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40601 return -EFAULT;
40602
40603 return n;
40604@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
40605 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40606 return -EIO;
40607
40608- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40609+ if (datalen > PAGE_SIZE - 1)
40610 return -EOPNOTSUPP;
40611
40612 /* drag the command string into the kernel so we can parse it */
40613@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
40614 if (args[0] != '%' || args[1] != '\0')
40615 return -EINVAL;
40616
40617- if (fstop < 0 || fstop >= cache->fcull_percent)
40618+ if (fstop >= cache->fcull_percent)
40619 return cachefiles_daemon_range_error(cache, args);
40620
40621 cache->fstop_percent = fstop;
40622@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
40623 if (args[0] != '%' || args[1] != '\0')
40624 return -EINVAL;
40625
40626- if (bstop < 0 || bstop >= cache->bcull_percent)
40627+ if (bstop >= cache->bcull_percent)
40628 return cachefiles_daemon_range_error(cache, args);
40629
40630 cache->bstop_percent = bstop;
40631diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
40632index bd6bc1b..b627b53 100644
40633--- a/fs/cachefiles/internal.h
40634+++ b/fs/cachefiles/internal.h
40635@@ -57,7 +57,7 @@ struct cachefiles_cache {
40636 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40637 struct rb_root active_nodes; /* active nodes (can't be culled) */
40638 rwlock_t active_lock; /* lock for active_nodes */
40639- atomic_t gravecounter; /* graveyard uniquifier */
40640+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40641 unsigned frun_percent; /* when to stop culling (% files) */
40642 unsigned fcull_percent; /* when to start culling (% files) */
40643 unsigned fstop_percent; /* when to stop allocating (% files) */
40644@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
40645 * proc.c
40646 */
40647 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40648-extern atomic_t cachefiles_lookup_histogram[HZ];
40649-extern atomic_t cachefiles_mkdir_histogram[HZ];
40650-extern atomic_t cachefiles_create_histogram[HZ];
40651+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40652+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40653+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40654
40655 extern int __init cachefiles_proc_init(void);
40656 extern void cachefiles_proc_cleanup(void);
40657 static inline
40658-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40659+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40660 {
40661 unsigned long jif = jiffies - start_jif;
40662 if (jif >= HZ)
40663 jif = HZ - 1;
40664- atomic_inc(&histogram[jif]);
40665+ atomic_inc_unchecked(&histogram[jif]);
40666 }
40667
40668 #else
40669diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
40670index a0358c2..d6137f2 100644
40671--- a/fs/cachefiles/namei.c
40672+++ b/fs/cachefiles/namei.c
40673@@ -318,7 +318,7 @@ try_again:
40674 /* first step is to make up a grave dentry in the graveyard */
40675 sprintf(nbuffer, "%08x%08x",
40676 (uint32_t) get_seconds(),
40677- (uint32_t) atomic_inc_return(&cache->gravecounter));
40678+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40679
40680 /* do the multiway lock magic */
40681 trap = lock_rename(cache->graveyard, dir);
40682diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
40683index eccd339..4c1d995 100644
40684--- a/fs/cachefiles/proc.c
40685+++ b/fs/cachefiles/proc.c
40686@@ -14,9 +14,9 @@
40687 #include <linux/seq_file.h>
40688 #include "internal.h"
40689
40690-atomic_t cachefiles_lookup_histogram[HZ];
40691-atomic_t cachefiles_mkdir_histogram[HZ];
40692-atomic_t cachefiles_create_histogram[HZ];
40693+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40694+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40695+atomic_unchecked_t cachefiles_create_histogram[HZ];
40696
40697 /*
40698 * display the latency histogram
40699@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
40700 return 0;
40701 default:
40702 index = (unsigned long) v - 3;
40703- x = atomic_read(&cachefiles_lookup_histogram[index]);
40704- y = atomic_read(&cachefiles_mkdir_histogram[index]);
40705- z = atomic_read(&cachefiles_create_histogram[index]);
40706+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40707+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40708+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40709 if (x == 0 && y == 0 && z == 0)
40710 return 0;
40711
40712diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
40713index 0e3c092..818480e 100644
40714--- a/fs/cachefiles/rdwr.c
40715+++ b/fs/cachefiles/rdwr.c
40716@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
40717 old_fs = get_fs();
40718 set_fs(KERNEL_DS);
40719 ret = file->f_op->write(
40720- file, (const void __user *) data, len, &pos);
40721+ file, (const void __force_user *) data, len, &pos);
40722 set_fs(old_fs);
40723 kunmap(page);
40724 if (ret != len)
40725diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
40726index 9895400..fa40a7d 100644
40727--- a/fs/ceph/dir.c
40728+++ b/fs/ceph/dir.c
40729@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
40730 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40731 struct ceph_mds_client *mdsc = fsc->mdsc;
40732 unsigned frag = fpos_frag(filp->f_pos);
40733- int off = fpos_off(filp->f_pos);
40734+ unsigned int off = fpos_off(filp->f_pos);
40735 int err;
40736 u32 ftype;
40737 struct ceph_mds_reply_info_parsed *rinfo;
40738diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
40739index 84e8c07..6170d31 100644
40740--- a/fs/cifs/cifs_debug.c
40741+++ b/fs/cifs/cifs_debug.c
40742@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40743
40744 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40745 #ifdef CONFIG_CIFS_STATS2
40746- atomic_set(&totBufAllocCount, 0);
40747- atomic_set(&totSmBufAllocCount, 0);
40748+ atomic_set_unchecked(&totBufAllocCount, 0);
40749+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40750 #endif /* CONFIG_CIFS_STATS2 */
40751 spin_lock(&cifs_tcp_ses_lock);
40752 list_for_each(tmp1, &cifs_tcp_ses_list) {
40753@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40754 tcon = list_entry(tmp3,
40755 struct cifs_tcon,
40756 tcon_list);
40757- atomic_set(&tcon->num_smbs_sent, 0);
40758- atomic_set(&tcon->num_writes, 0);
40759- atomic_set(&tcon->num_reads, 0);
40760- atomic_set(&tcon->num_oplock_brks, 0);
40761- atomic_set(&tcon->num_opens, 0);
40762- atomic_set(&tcon->num_posixopens, 0);
40763- atomic_set(&tcon->num_posixmkdirs, 0);
40764- atomic_set(&tcon->num_closes, 0);
40765- atomic_set(&tcon->num_deletes, 0);
40766- atomic_set(&tcon->num_mkdirs, 0);
40767- atomic_set(&tcon->num_rmdirs, 0);
40768- atomic_set(&tcon->num_renames, 0);
40769- atomic_set(&tcon->num_t2renames, 0);
40770- atomic_set(&tcon->num_ffirst, 0);
40771- atomic_set(&tcon->num_fnext, 0);
40772- atomic_set(&tcon->num_fclose, 0);
40773- atomic_set(&tcon->num_hardlinks, 0);
40774- atomic_set(&tcon->num_symlinks, 0);
40775- atomic_set(&tcon->num_locks, 0);
40776+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40777+ atomic_set_unchecked(&tcon->num_writes, 0);
40778+ atomic_set_unchecked(&tcon->num_reads, 0);
40779+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40780+ atomic_set_unchecked(&tcon->num_opens, 0);
40781+ atomic_set_unchecked(&tcon->num_posixopens, 0);
40782+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40783+ atomic_set_unchecked(&tcon->num_closes, 0);
40784+ atomic_set_unchecked(&tcon->num_deletes, 0);
40785+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
40786+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
40787+ atomic_set_unchecked(&tcon->num_renames, 0);
40788+ atomic_set_unchecked(&tcon->num_t2renames, 0);
40789+ atomic_set_unchecked(&tcon->num_ffirst, 0);
40790+ atomic_set_unchecked(&tcon->num_fnext, 0);
40791+ atomic_set_unchecked(&tcon->num_fclose, 0);
40792+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
40793+ atomic_set_unchecked(&tcon->num_symlinks, 0);
40794+ atomic_set_unchecked(&tcon->num_locks, 0);
40795 }
40796 }
40797 }
40798@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40799 smBufAllocCount.counter, cifs_min_small);
40800 #ifdef CONFIG_CIFS_STATS2
40801 seq_printf(m, "Total Large %d Small %d Allocations\n",
40802- atomic_read(&totBufAllocCount),
40803- atomic_read(&totSmBufAllocCount));
40804+ atomic_read_unchecked(&totBufAllocCount),
40805+ atomic_read_unchecked(&totSmBufAllocCount));
40806 #endif /* CONFIG_CIFS_STATS2 */
40807
40808 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40809@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40810 if (tcon->need_reconnect)
40811 seq_puts(m, "\tDISCONNECTED ");
40812 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40813- atomic_read(&tcon->num_smbs_sent),
40814- atomic_read(&tcon->num_oplock_brks));
40815+ atomic_read_unchecked(&tcon->num_smbs_sent),
40816+ atomic_read_unchecked(&tcon->num_oplock_brks));
40817 seq_printf(m, "\nReads: %d Bytes: %lld",
40818- atomic_read(&tcon->num_reads),
40819+ atomic_read_unchecked(&tcon->num_reads),
40820 (long long)(tcon->bytes_read));
40821 seq_printf(m, "\nWrites: %d Bytes: %lld",
40822- atomic_read(&tcon->num_writes),
40823+ atomic_read_unchecked(&tcon->num_writes),
40824 (long long)(tcon->bytes_written));
40825 seq_printf(m, "\nFlushes: %d",
40826- atomic_read(&tcon->num_flushes));
40827+ atomic_read_unchecked(&tcon->num_flushes));
40828 seq_printf(m, "\nLocks: %d HardLinks: %d "
40829 "Symlinks: %d",
40830- atomic_read(&tcon->num_locks),
40831- atomic_read(&tcon->num_hardlinks),
40832- atomic_read(&tcon->num_symlinks));
40833+ atomic_read_unchecked(&tcon->num_locks),
40834+ atomic_read_unchecked(&tcon->num_hardlinks),
40835+ atomic_read_unchecked(&tcon->num_symlinks));
40836 seq_printf(m, "\nOpens: %d Closes: %d "
40837 "Deletes: %d",
40838- atomic_read(&tcon->num_opens),
40839- atomic_read(&tcon->num_closes),
40840- atomic_read(&tcon->num_deletes));
40841+ atomic_read_unchecked(&tcon->num_opens),
40842+ atomic_read_unchecked(&tcon->num_closes),
40843+ atomic_read_unchecked(&tcon->num_deletes));
40844 seq_printf(m, "\nPosix Opens: %d "
40845 "Posix Mkdirs: %d",
40846- atomic_read(&tcon->num_posixopens),
40847- atomic_read(&tcon->num_posixmkdirs));
40848+ atomic_read_unchecked(&tcon->num_posixopens),
40849+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40850 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40851- atomic_read(&tcon->num_mkdirs),
40852- atomic_read(&tcon->num_rmdirs));
40853+ atomic_read_unchecked(&tcon->num_mkdirs),
40854+ atomic_read_unchecked(&tcon->num_rmdirs));
40855 seq_printf(m, "\nRenames: %d T2 Renames %d",
40856- atomic_read(&tcon->num_renames),
40857- atomic_read(&tcon->num_t2renames));
40858+ atomic_read_unchecked(&tcon->num_renames),
40859+ atomic_read_unchecked(&tcon->num_t2renames));
40860 seq_printf(m, "\nFindFirst: %d FNext %d "
40861 "FClose %d",
40862- atomic_read(&tcon->num_ffirst),
40863- atomic_read(&tcon->num_fnext),
40864- atomic_read(&tcon->num_fclose));
40865+ atomic_read_unchecked(&tcon->num_ffirst),
40866+ atomic_read_unchecked(&tcon->num_fnext),
40867+ atomic_read_unchecked(&tcon->num_fclose));
40868 }
40869 }
40870 }
40871diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
40872index 8f1fe32..38f9e27 100644
40873--- a/fs/cifs/cifsfs.c
40874+++ b/fs/cifs/cifsfs.c
40875@@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
40876 cifs_req_cachep = kmem_cache_create("cifs_request",
40877 CIFSMaxBufSize +
40878 MAX_CIFS_HDR_SIZE, 0,
40879- SLAB_HWCACHE_ALIGN, NULL);
40880+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40881 if (cifs_req_cachep == NULL)
40882 return -ENOMEM;
40883
40884@@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
40885 efficient to alloc 1 per page off the slab compared to 17K (5page)
40886 alloc of large cifs buffers even when page debugging is on */
40887 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40888- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40889+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40890 NULL);
40891 if (cifs_sm_req_cachep == NULL) {
40892 mempool_destroy(cifs_req_poolp);
40893@@ -1101,8 +1101,8 @@ init_cifs(void)
40894 atomic_set(&bufAllocCount, 0);
40895 atomic_set(&smBufAllocCount, 0);
40896 #ifdef CONFIG_CIFS_STATS2
40897- atomic_set(&totBufAllocCount, 0);
40898- atomic_set(&totSmBufAllocCount, 0);
40899+ atomic_set_unchecked(&totBufAllocCount, 0);
40900+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40901 #endif /* CONFIG_CIFS_STATS2 */
40902
40903 atomic_set(&midCount, 0);
40904diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
40905index 8238aa1..0347196 100644
40906--- a/fs/cifs/cifsglob.h
40907+++ b/fs/cifs/cifsglob.h
40908@@ -392,28 +392,28 @@ struct cifs_tcon {
40909 __u16 Flags; /* optional support bits */
40910 enum statusEnum tidStatus;
40911 #ifdef CONFIG_CIFS_STATS
40912- atomic_t num_smbs_sent;
40913- atomic_t num_writes;
40914- atomic_t num_reads;
40915- atomic_t num_flushes;
40916- atomic_t num_oplock_brks;
40917- atomic_t num_opens;
40918- atomic_t num_closes;
40919- atomic_t num_deletes;
40920- atomic_t num_mkdirs;
40921- atomic_t num_posixopens;
40922- atomic_t num_posixmkdirs;
40923- atomic_t num_rmdirs;
40924- atomic_t num_renames;
40925- atomic_t num_t2renames;
40926- atomic_t num_ffirst;
40927- atomic_t num_fnext;
40928- atomic_t num_fclose;
40929- atomic_t num_hardlinks;
40930- atomic_t num_symlinks;
40931- atomic_t num_locks;
40932- atomic_t num_acl_get;
40933- atomic_t num_acl_set;
40934+ atomic_unchecked_t num_smbs_sent;
40935+ atomic_unchecked_t num_writes;
40936+ atomic_unchecked_t num_reads;
40937+ atomic_unchecked_t num_flushes;
40938+ atomic_unchecked_t num_oplock_brks;
40939+ atomic_unchecked_t num_opens;
40940+ atomic_unchecked_t num_closes;
40941+ atomic_unchecked_t num_deletes;
40942+ atomic_unchecked_t num_mkdirs;
40943+ atomic_unchecked_t num_posixopens;
40944+ atomic_unchecked_t num_posixmkdirs;
40945+ atomic_unchecked_t num_rmdirs;
40946+ atomic_unchecked_t num_renames;
40947+ atomic_unchecked_t num_t2renames;
40948+ atomic_unchecked_t num_ffirst;
40949+ atomic_unchecked_t num_fnext;
40950+ atomic_unchecked_t num_fclose;
40951+ atomic_unchecked_t num_hardlinks;
40952+ atomic_unchecked_t num_symlinks;
40953+ atomic_unchecked_t num_locks;
40954+ atomic_unchecked_t num_acl_get;
40955+ atomic_unchecked_t num_acl_set;
40956 #ifdef CONFIG_CIFS_STATS2
40957 unsigned long long time_writes;
40958 unsigned long long time_reads;
40959@@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
40960 }
40961
40962 #ifdef CONFIG_CIFS_STATS
40963-#define cifs_stats_inc atomic_inc
40964+#define cifs_stats_inc atomic_inc_unchecked
40965
40966 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
40967 unsigned int bytes)
40968@@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
40969 /* Various Debug counters */
40970 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
40971 #ifdef CONFIG_CIFS_STATS2
40972-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
40973-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
40974+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
40975+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
40976 #endif
40977 GLOBAL_EXTERN atomic_t smBufAllocCount;
40978 GLOBAL_EXTERN atomic_t midCount;
40979diff --git a/fs/cifs/link.c b/fs/cifs/link.c
40980index 6b0e064..94e6c3c 100644
40981--- a/fs/cifs/link.c
40982+++ b/fs/cifs/link.c
40983@@ -600,7 +600,7 @@ symlink_exit:
40984
40985 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40986 {
40987- char *p = nd_get_link(nd);
40988+ const char *p = nd_get_link(nd);
40989 if (!IS_ERR(p))
40990 kfree(p);
40991 }
40992diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
40993index 703ef5c..2a44ed5 100644
40994--- a/fs/cifs/misc.c
40995+++ b/fs/cifs/misc.c
40996@@ -156,7 +156,7 @@ cifs_buf_get(void)
40997 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
40998 atomic_inc(&bufAllocCount);
40999 #ifdef CONFIG_CIFS_STATS2
41000- atomic_inc(&totBufAllocCount);
41001+ atomic_inc_unchecked(&totBufAllocCount);
41002 #endif /* CONFIG_CIFS_STATS2 */
41003 }
41004
41005@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41006 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41007 atomic_inc(&smBufAllocCount);
41008 #ifdef CONFIG_CIFS_STATS2
41009- atomic_inc(&totSmBufAllocCount);
41010+ atomic_inc_unchecked(&totSmBufAllocCount);
41011 #endif /* CONFIG_CIFS_STATS2 */
41012
41013 }
41014diff --git a/fs/coda/cache.c b/fs/coda/cache.c
41015index 6901578..d402eb5 100644
41016--- a/fs/coda/cache.c
41017+++ b/fs/coda/cache.c
41018@@ -24,7 +24,7 @@
41019 #include "coda_linux.h"
41020 #include "coda_cache.h"
41021
41022-static atomic_t permission_epoch = ATOMIC_INIT(0);
41023+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41024
41025 /* replace or extend an acl cache hit */
41026 void coda_cache_enter(struct inode *inode, int mask)
41027@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
41028 struct coda_inode_info *cii = ITOC(inode);
41029
41030 spin_lock(&cii->c_lock);
41031- cii->c_cached_epoch = atomic_read(&permission_epoch);
41032+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41033 if (cii->c_uid != current_fsuid()) {
41034 cii->c_uid = current_fsuid();
41035 cii->c_cached_perm = mask;
41036@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
41037 {
41038 struct coda_inode_info *cii = ITOC(inode);
41039 spin_lock(&cii->c_lock);
41040- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41041+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41042 spin_unlock(&cii->c_lock);
41043 }
41044
41045 /* remove all acl caches */
41046 void coda_cache_clear_all(struct super_block *sb)
41047 {
41048- atomic_inc(&permission_epoch);
41049+ atomic_inc_unchecked(&permission_epoch);
41050 }
41051
41052
41053@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
41054 spin_lock(&cii->c_lock);
41055 hit = (mask & cii->c_cached_perm) == mask &&
41056 cii->c_uid == current_fsuid() &&
41057- cii->c_cached_epoch == atomic_read(&permission_epoch);
41058+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41059 spin_unlock(&cii->c_lock);
41060
41061 return hit;
41062diff --git a/fs/compat.c b/fs/compat.c
41063index c987875..08771ca 100644
41064--- a/fs/compat.c
41065+++ b/fs/compat.c
41066@@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
41067 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41068 {
41069 compat_ino_t ino = stat->ino;
41070- typeof(ubuf->st_uid) uid = 0;
41071- typeof(ubuf->st_gid) gid = 0;
41072+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41073+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41074 int err;
41075
41076 SET_UID(uid, stat->uid);
41077@@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
41078
41079 set_fs(KERNEL_DS);
41080 /* The __user pointer cast is valid because of the set_fs() */
41081- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41082+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41083 set_fs(oldfs);
41084 /* truncating is ok because it's a user address */
41085 if (!ret)
41086@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
41087 goto out;
41088
41089 ret = -EINVAL;
41090- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41091+ if (nr_segs > UIO_MAXIOV)
41092 goto out;
41093 if (nr_segs > fast_segs) {
41094 ret = -ENOMEM;
41095@@ -845,6 +845,7 @@ struct compat_old_linux_dirent {
41096
41097 struct compat_readdir_callback {
41098 struct compat_old_linux_dirent __user *dirent;
41099+ struct file * file;
41100 int result;
41101 };
41102
41103@@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
41104 buf->result = -EOVERFLOW;
41105 return -EOVERFLOW;
41106 }
41107+
41108+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41109+ return 0;
41110+
41111 buf->result++;
41112 dirent = buf->dirent;
41113 if (!access_ok(VERIFY_WRITE, dirent,
41114@@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
41115
41116 buf.result = 0;
41117 buf.dirent = dirent;
41118+ buf.file = file;
41119
41120 error = vfs_readdir(file, compat_fillonedir, &buf);
41121 if (buf.result)
41122@@ -914,6 +920,7 @@ struct compat_linux_dirent {
41123 struct compat_getdents_callback {
41124 struct compat_linux_dirent __user *current_dir;
41125 struct compat_linux_dirent __user *previous;
41126+ struct file * file;
41127 int count;
41128 int error;
41129 };
41130@@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
41131 buf->error = -EOVERFLOW;
41132 return -EOVERFLOW;
41133 }
41134+
41135+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41136+ return 0;
41137+
41138 dirent = buf->previous;
41139 if (dirent) {
41140 if (__put_user(offset, &dirent->d_off))
41141@@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
41142 buf.previous = NULL;
41143 buf.count = count;
41144 buf.error = 0;
41145+ buf.file = file;
41146
41147 error = vfs_readdir(file, compat_filldir, &buf);
41148 if (error >= 0)
41149@@ -1003,6 +1015,7 @@ out:
41150 struct compat_getdents_callback64 {
41151 struct linux_dirent64 __user *current_dir;
41152 struct linux_dirent64 __user *previous;
41153+ struct file * file;
41154 int count;
41155 int error;
41156 };
41157@@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
41158 buf->error = -EINVAL; /* only used if we fail.. */
41159 if (reclen > buf->count)
41160 return -EINVAL;
41161+
41162+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41163+ return 0;
41164+
41165 dirent = buf->previous;
41166
41167 if (dirent) {
41168@@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
41169 buf.previous = NULL;
41170 buf.count = count;
41171 buf.error = 0;
41172+ buf.file = file;
41173
41174 error = vfs_readdir(file, compat_filldir64, &buf);
41175 if (error >= 0)
41176 error = buf.error;
41177 lastdirent = buf.previous;
41178 if (lastdirent) {
41179- typeof(lastdirent->d_off) d_off = file->f_pos;
41180+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41181 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41182 error = -EFAULT;
41183 else
41184diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
41185index 112e45a..b59845b 100644
41186--- a/fs/compat_binfmt_elf.c
41187+++ b/fs/compat_binfmt_elf.c
41188@@ -30,11 +30,13 @@
41189 #undef elf_phdr
41190 #undef elf_shdr
41191 #undef elf_note
41192+#undef elf_dyn
41193 #undef elf_addr_t
41194 #define elfhdr elf32_hdr
41195 #define elf_phdr elf32_phdr
41196 #define elf_shdr elf32_shdr
41197 #define elf_note elf32_note
41198+#define elf_dyn Elf32_Dyn
41199 #define elf_addr_t Elf32_Addr
41200
41201 /*
41202diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
41203index 51352de..93292ff 100644
41204--- a/fs/compat_ioctl.c
41205+++ b/fs/compat_ioctl.c
41206@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
41207
41208 err = get_user(palp, &up->palette);
41209 err |= get_user(length, &up->length);
41210+ if (err)
41211+ return -EFAULT;
41212
41213 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41214 err = put_user(compat_ptr(palp), &up_native->palette);
41215@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
41216 return -EFAULT;
41217 if (__get_user(udata, &ss32->iomem_base))
41218 return -EFAULT;
41219- ss.iomem_base = compat_ptr(udata);
41220+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41221 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41222 __get_user(ss.port_high, &ss32->port_high))
41223 return -EFAULT;
41224@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
41225 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41226 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41227 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41228- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41229+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41230 return -EFAULT;
41231
41232 return ioctl_preallocate(file, p);
41233@@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
41234 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41235 {
41236 unsigned int a, b;
41237- a = *(unsigned int *)p;
41238- b = *(unsigned int *)q;
41239+ a = *(const unsigned int *)p;
41240+ b = *(const unsigned int *)q;
41241 if (a > b)
41242 return 1;
41243 if (a < b)
41244diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
41245index 9a37a9b..35792b6 100644
41246--- a/fs/configfs/dir.c
41247+++ b/fs/configfs/dir.c
41248@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41249 }
41250 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41251 struct configfs_dirent *next;
41252- const char * name;
41253+ const unsigned char * name;
41254+ char d_name[sizeof(next->s_dentry->d_iname)];
41255 int len;
41256 struct inode *inode = NULL;
41257
41258@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41259 continue;
41260
41261 name = configfs_get_name(next);
41262- len = strlen(name);
41263+ if (next->s_dentry && name == next->s_dentry->d_iname) {
41264+ len = next->s_dentry->d_name.len;
41265+ memcpy(d_name, name, len);
41266+ name = d_name;
41267+ } else
41268+ len = strlen(name);
41269
41270 /*
41271 * We'll have a dentry and an inode for
41272diff --git a/fs/dcache.c b/fs/dcache.c
41273index f7908ae..920a680 100644
41274--- a/fs/dcache.c
41275+++ b/fs/dcache.c
41276@@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages)
41277 mempages -= reserve;
41278
41279 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41280- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41281+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41282
41283 dcache_init();
41284 inode_init();
41285diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
41286index 32f90a3..0be89e0 100644
41287--- a/fs/ecryptfs/inode.c
41288+++ b/fs/ecryptfs/inode.c
41289@@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
41290 old_fs = get_fs();
41291 set_fs(get_ds());
41292 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41293- (char __user *)lower_buf,
41294+ (char __force_user *)lower_buf,
41295 lower_bufsiz);
41296 set_fs(old_fs);
41297 if (rc < 0)
41298@@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41299 }
41300 old_fs = get_fs();
41301 set_fs(get_ds());
41302- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41303+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41304 set_fs(old_fs);
41305 if (rc < 0) {
41306 kfree(buf);
41307@@ -752,7 +752,7 @@ out:
41308 static void
41309 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41310 {
41311- char *buf = nd_get_link(nd);
41312+ const char *buf = nd_get_link(nd);
41313 if (!IS_ERR(buf)) {
41314 /* Free the char* */
41315 kfree(buf);
41316diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
41317index 940a82e..63af89e 100644
41318--- a/fs/ecryptfs/miscdev.c
41319+++ b/fs/ecryptfs/miscdev.c
41320@@ -328,7 +328,7 @@ check_list:
41321 goto out_unlock_msg_ctx;
41322 i = 5;
41323 if (msg_ctx->msg) {
41324- if (copy_to_user(&buf[i], packet_length, packet_length_size))
41325+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41326 goto out_unlock_msg_ctx;
41327 i += packet_length_size;
41328 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41329diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
41330index 3745f7c..89cc7a3 100644
41331--- a/fs/ecryptfs/read_write.c
41332+++ b/fs/ecryptfs/read_write.c
41333@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
41334 return -EIO;
41335 fs_save = get_fs();
41336 set_fs(get_ds());
41337- rc = vfs_write(lower_file, data, size, &offset);
41338+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41339 set_fs(fs_save);
41340 mark_inode_dirty_sync(ecryptfs_inode);
41341 return rc;
41342@@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
41343 return -EIO;
41344 fs_save = get_fs();
41345 set_fs(get_ds());
41346- rc = vfs_read(lower_file, data, size, &offset);
41347+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41348 set_fs(fs_save);
41349 return rc;
41350 }
41351diff --git a/fs/exec.c b/fs/exec.c
41352index 3625464..d08b205 100644
41353--- a/fs/exec.c
41354+++ b/fs/exec.c
41355@@ -55,12 +55,28 @@
41356 #include <linux/pipe_fs_i.h>
41357 #include <linux/oom.h>
41358 #include <linux/compat.h>
41359+#include <linux/random.h>
41360+#include <linux/seq_file.h>
41361+
41362+#ifdef CONFIG_PAX_REFCOUNT
41363+#include <linux/kallsyms.h>
41364+#include <linux/kdebug.h>
41365+#endif
41366
41367 #include <asm/uaccess.h>
41368 #include <asm/mmu_context.h>
41369 #include <asm/tlb.h>
41370 #include "internal.h"
41371
41372+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
41373+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
41374+#endif
41375+
41376+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41377+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41378+EXPORT_SYMBOL(pax_set_initial_flags_func);
41379+#endif
41380+
41381 int core_uses_pid;
41382 char core_pattern[CORENAME_MAX_SIZE] = "core";
41383 unsigned int core_pipe_limit;
41384@@ -70,7 +86,7 @@ struct core_name {
41385 char *corename;
41386 int used, size;
41387 };
41388-static atomic_t call_count = ATOMIC_INIT(1);
41389+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41390
41391 /* The maximal length of core_pattern is also specified in sysctl.c */
41392
41393@@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41394 int write)
41395 {
41396 struct page *page;
41397- int ret;
41398
41399-#ifdef CONFIG_STACK_GROWSUP
41400- if (write) {
41401- ret = expand_downwards(bprm->vma, pos);
41402- if (ret < 0)
41403- return NULL;
41404- }
41405-#endif
41406- ret = get_user_pages(current, bprm->mm, pos,
41407- 1, write, 1, &page, NULL);
41408- if (ret <= 0)
41409+ if (0 > expand_downwards(bprm->vma, pos))
41410+ return NULL;
41411+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41412 return NULL;
41413
41414 if (write) {
41415@@ -274,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41416 vma->vm_end = STACK_TOP_MAX;
41417 vma->vm_start = vma->vm_end - PAGE_SIZE;
41418 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41419+
41420+#ifdef CONFIG_PAX_SEGMEXEC
41421+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41422+#endif
41423+
41424 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41425 INIT_LIST_HEAD(&vma->anon_vma_chain);
41426
41427@@ -288,6 +301,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41428 mm->stack_vm = mm->total_vm = 1;
41429 up_write(&mm->mmap_sem);
41430 bprm->p = vma->vm_end - sizeof(void *);
41431+
41432+#ifdef CONFIG_PAX_RANDUSTACK
41433+ if (randomize_va_space)
41434+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41435+#endif
41436+
41437 return 0;
41438 err:
41439 up_write(&mm->mmap_sem);
41440@@ -396,19 +415,7 @@ err:
41441 return err;
41442 }
41443
41444-struct user_arg_ptr {
41445-#ifdef CONFIG_COMPAT
41446- bool is_compat;
41447-#endif
41448- union {
41449- const char __user *const __user *native;
41450-#ifdef CONFIG_COMPAT
41451- compat_uptr_t __user *compat;
41452-#endif
41453- } ptr;
41454-};
41455-
41456-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41457+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41458 {
41459 const char __user *native;
41460
41461@@ -417,14 +424,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41462 compat_uptr_t compat;
41463
41464 if (get_user(compat, argv.ptr.compat + nr))
41465- return ERR_PTR(-EFAULT);
41466+ return (const char __force_user *)ERR_PTR(-EFAULT);
41467
41468 return compat_ptr(compat);
41469 }
41470 #endif
41471
41472 if (get_user(native, argv.ptr.native + nr))
41473- return ERR_PTR(-EFAULT);
41474+ return (const char __force_user *)ERR_PTR(-EFAULT);
41475
41476 return native;
41477 }
41478@@ -443,7 +450,7 @@ static int count(struct user_arg_ptr argv, int max)
41479 if (!p)
41480 break;
41481
41482- if (IS_ERR(p))
41483+ if (IS_ERR((const char __force_kernel *)p))
41484 return -EFAULT;
41485
41486 if (i++ >= max)
41487@@ -477,7 +484,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
41488
41489 ret = -EFAULT;
41490 str = get_user_arg_ptr(argv, argc);
41491- if (IS_ERR(str))
41492+ if (IS_ERR((const char __force_kernel *)str))
41493 goto out;
41494
41495 len = strnlen_user(str, MAX_ARG_STRLEN);
41496@@ -559,7 +566,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
41497 int r;
41498 mm_segment_t oldfs = get_fs();
41499 struct user_arg_ptr argv = {
41500- .ptr.native = (const char __user *const __user *)__argv,
41501+ .ptr.native = (const char __force_user *const __force_user *)__argv,
41502 };
41503
41504 set_fs(KERNEL_DS);
41505@@ -594,7 +601,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41506 unsigned long new_end = old_end - shift;
41507 struct mmu_gather tlb;
41508
41509- BUG_ON(new_start > new_end);
41510+ if (new_start >= new_end || new_start < mmap_min_addr)
41511+ return -ENOMEM;
41512
41513 /*
41514 * ensure there are no vmas between where we want to go
41515@@ -603,6 +611,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41516 if (vma != find_vma(mm, new_start))
41517 return -EFAULT;
41518
41519+#ifdef CONFIG_PAX_SEGMEXEC
41520+ BUG_ON(pax_find_mirror_vma(vma));
41521+#endif
41522+
41523 /*
41524 * cover the whole range: [new_start, old_end)
41525 */
41526@@ -683,10 +695,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41527 stack_top = arch_align_stack(stack_top);
41528 stack_top = PAGE_ALIGN(stack_top);
41529
41530- if (unlikely(stack_top < mmap_min_addr) ||
41531- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41532- return -ENOMEM;
41533-
41534 stack_shift = vma->vm_end - stack_top;
41535
41536 bprm->p -= stack_shift;
41537@@ -698,8 +706,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
41538 bprm->exec -= stack_shift;
41539
41540 down_write(&mm->mmap_sem);
41541+
41542+ /* Move stack pages down in memory. */
41543+ if (stack_shift) {
41544+ ret = shift_arg_pages(vma, stack_shift);
41545+ if (ret)
41546+ goto out_unlock;
41547+ }
41548+
41549 vm_flags = VM_STACK_FLAGS;
41550
41551+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41552+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41553+ vm_flags &= ~VM_EXEC;
41554+
41555+#ifdef CONFIG_PAX_MPROTECT
41556+ if (mm->pax_flags & MF_PAX_MPROTECT)
41557+ vm_flags &= ~VM_MAYEXEC;
41558+#endif
41559+
41560+ }
41561+#endif
41562+
41563 /*
41564 * Adjust stack execute permissions; explicitly enable for
41565 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41566@@ -718,13 +746,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41567 goto out_unlock;
41568 BUG_ON(prev != vma);
41569
41570- /* Move stack pages down in memory. */
41571- if (stack_shift) {
41572- ret = shift_arg_pages(vma, stack_shift);
41573- if (ret)
41574- goto out_unlock;
41575- }
41576-
41577 /* mprotect_fixup is overkill to remove the temporary stack flags */
41578 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41579
41580@@ -805,7 +826,7 @@ int kernel_read(struct file *file, loff_t offset,
41581 old_fs = get_fs();
41582 set_fs(get_ds());
41583 /* The cast to a user pointer is valid due to the set_fs() */
41584- result = vfs_read(file, (void __user *)addr, count, &pos);
41585+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
41586 set_fs(old_fs);
41587 return result;
41588 }
41589@@ -1247,7 +1268,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
41590 }
41591 rcu_read_unlock();
41592
41593- if (p->fs->users > n_fs) {
41594+ if (atomic_read(&p->fs->users) > n_fs) {
41595 bprm->unsafe |= LSM_UNSAFE_SHARE;
41596 } else {
41597 res = -EAGAIN;
41598@@ -1450,6 +1471,11 @@ static int do_execve_common(const char *filename,
41599 struct user_arg_ptr envp,
41600 struct pt_regs *regs)
41601 {
41602+#ifdef CONFIG_GRKERNSEC
41603+ struct file *old_exec_file;
41604+ struct acl_subject_label *old_acl;
41605+ struct rlimit old_rlim[RLIM_NLIMITS];
41606+#endif
41607 struct linux_binprm *bprm;
41608 struct file *file;
41609 struct files_struct *displaced;
41610@@ -1457,6 +1483,8 @@ static int do_execve_common(const char *filename,
41611 int retval;
41612 const struct cred *cred = current_cred();
41613
41614+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41615+
41616 /*
41617 * We move the actual failure in case of RLIMIT_NPROC excess from
41618 * set*uid() to execve() because too many poorly written programs
41619@@ -1497,12 +1525,27 @@ static int do_execve_common(const char *filename,
41620 if (IS_ERR(file))
41621 goto out_unmark;
41622
41623+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
41624+ retval = -EPERM;
41625+ goto out_file;
41626+ }
41627+
41628 sched_exec();
41629
41630 bprm->file = file;
41631 bprm->filename = filename;
41632 bprm->interp = filename;
41633
41634+ if (gr_process_user_ban()) {
41635+ retval = -EPERM;
41636+ goto out_file;
41637+ }
41638+
41639+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41640+ retval = -EACCES;
41641+ goto out_file;
41642+ }
41643+
41644 retval = bprm_mm_init(bprm);
41645 if (retval)
41646 goto out_file;
41647@@ -1532,9 +1575,40 @@ static int do_execve_common(const char *filename,
41648 if (retval < 0)
41649 goto out;
41650
41651+ if (!gr_tpe_allow(file)) {
41652+ retval = -EACCES;
41653+ goto out;
41654+ }
41655+
41656+ if (gr_check_crash_exec(file)) {
41657+ retval = -EACCES;
41658+ goto out;
41659+ }
41660+
41661+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41662+
41663+ gr_handle_exec_args(bprm, argv);
41664+
41665+#ifdef CONFIG_GRKERNSEC
41666+ old_acl = current->acl;
41667+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41668+ old_exec_file = current->exec_file;
41669+ get_file(file);
41670+ current->exec_file = file;
41671+#endif
41672+
41673+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41674+ bprm->unsafe & LSM_UNSAFE_SHARE);
41675+ if (retval < 0)
41676+ goto out_fail;
41677+
41678 retval = search_binary_handler(bprm,regs);
41679 if (retval < 0)
41680- goto out;
41681+ goto out_fail;
41682+#ifdef CONFIG_GRKERNSEC
41683+ if (old_exec_file)
41684+ fput(old_exec_file);
41685+#endif
41686
41687 /* execve succeeded */
41688 current->fs->in_exec = 0;
41689@@ -1545,6 +1619,14 @@ static int do_execve_common(const char *filename,
41690 put_files_struct(displaced);
41691 return retval;
41692
41693+out_fail:
41694+#ifdef CONFIG_GRKERNSEC
41695+ current->acl = old_acl;
41696+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41697+ fput(current->exec_file);
41698+ current->exec_file = old_exec_file;
41699+#endif
41700+
41701 out:
41702 if (bprm->mm) {
41703 acct_arg_size(bprm, 0);
41704@@ -1618,7 +1700,7 @@ static int expand_corename(struct core_name *cn)
41705 {
41706 char *old_corename = cn->corename;
41707
41708- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
41709+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
41710 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
41711
41712 if (!cn->corename) {
41713@@ -1715,7 +1797,7 @@ static int format_corename(struct core_name *cn, long signr)
41714 int pid_in_pattern = 0;
41715 int err = 0;
41716
41717- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
41718+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
41719 cn->corename = kmalloc(cn->size, GFP_KERNEL);
41720 cn->used = 0;
41721
41722@@ -1812,6 +1894,218 @@ out:
41723 return ispipe;
41724 }
41725
41726+int pax_check_flags(unsigned long *flags)
41727+{
41728+ int retval = 0;
41729+
41730+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41731+ if (*flags & MF_PAX_SEGMEXEC)
41732+ {
41733+ *flags &= ~MF_PAX_SEGMEXEC;
41734+ retval = -EINVAL;
41735+ }
41736+#endif
41737+
41738+ if ((*flags & MF_PAX_PAGEEXEC)
41739+
41740+#ifdef CONFIG_PAX_PAGEEXEC
41741+ && (*flags & MF_PAX_SEGMEXEC)
41742+#endif
41743+
41744+ )
41745+ {
41746+ *flags &= ~MF_PAX_PAGEEXEC;
41747+ retval = -EINVAL;
41748+ }
41749+
41750+ if ((*flags & MF_PAX_MPROTECT)
41751+
41752+#ifdef CONFIG_PAX_MPROTECT
41753+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41754+#endif
41755+
41756+ )
41757+ {
41758+ *flags &= ~MF_PAX_MPROTECT;
41759+ retval = -EINVAL;
41760+ }
41761+
41762+ if ((*flags & MF_PAX_EMUTRAMP)
41763+
41764+#ifdef CONFIG_PAX_EMUTRAMP
41765+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41766+#endif
41767+
41768+ )
41769+ {
41770+ *flags &= ~MF_PAX_EMUTRAMP;
41771+ retval = -EINVAL;
41772+ }
41773+
41774+ return retval;
41775+}
41776+
41777+EXPORT_SYMBOL(pax_check_flags);
41778+
41779+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41780+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41781+{
41782+ struct task_struct *tsk = current;
41783+ struct mm_struct *mm = current->mm;
41784+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41785+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41786+ char *path_exec = NULL;
41787+ char *path_fault = NULL;
41788+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
41789+
41790+ if (buffer_exec && buffer_fault) {
41791+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41792+
41793+ down_read(&mm->mmap_sem);
41794+ vma = mm->mmap;
41795+ while (vma && (!vma_exec || !vma_fault)) {
41796+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41797+ vma_exec = vma;
41798+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41799+ vma_fault = vma;
41800+ vma = vma->vm_next;
41801+ }
41802+ if (vma_exec) {
41803+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41804+ if (IS_ERR(path_exec))
41805+ path_exec = "<path too long>";
41806+ else {
41807+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41808+ if (path_exec) {
41809+ *path_exec = 0;
41810+ path_exec = buffer_exec;
41811+ } else
41812+ path_exec = "<path too long>";
41813+ }
41814+ }
41815+ if (vma_fault) {
41816+ start = vma_fault->vm_start;
41817+ end = vma_fault->vm_end;
41818+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41819+ if (vma_fault->vm_file) {
41820+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41821+ if (IS_ERR(path_fault))
41822+ path_fault = "<path too long>";
41823+ else {
41824+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41825+ if (path_fault) {
41826+ *path_fault = 0;
41827+ path_fault = buffer_fault;
41828+ } else
41829+ path_fault = "<path too long>";
41830+ }
41831+ } else
41832+ path_fault = "<anonymous mapping>";
41833+ }
41834+ up_read(&mm->mmap_sem);
41835+ }
41836+ if (tsk->signal->curr_ip)
41837+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41838+ else
41839+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41840+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41841+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41842+ task_uid(tsk), task_euid(tsk), pc, sp);
41843+ free_page((unsigned long)buffer_exec);
41844+ free_page((unsigned long)buffer_fault);
41845+ pax_report_insns(regs, pc, sp);
41846+ do_coredump(SIGKILL, SIGKILL, regs);
41847+}
41848+#endif
41849+
41850+#ifdef CONFIG_PAX_REFCOUNT
41851+void pax_report_refcount_overflow(struct pt_regs *regs)
41852+{
41853+ if (current->signal->curr_ip)
41854+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41855+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41856+ else
41857+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41858+ current->comm, task_pid_nr(current), current_uid(), current_euid());
41859+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41860+ show_regs(regs);
41861+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
41862+}
41863+#endif
41864+
41865+#ifdef CONFIG_PAX_USERCOPY
41866+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41867+int object_is_on_stack(const void *obj, unsigned long len)
41868+{
41869+ const void * const stack = task_stack_page(current);
41870+ const void * const stackend = stack + THREAD_SIZE;
41871+
41872+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41873+ const void *frame = NULL;
41874+ const void *oldframe;
41875+#endif
41876+
41877+ if (obj + len < obj)
41878+ return -1;
41879+
41880+ if (obj + len <= stack || stackend <= obj)
41881+ return 0;
41882+
41883+ if (obj < stack || stackend < obj + len)
41884+ return -1;
41885+
41886+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41887+ oldframe = __builtin_frame_address(1);
41888+ if (oldframe)
41889+ frame = __builtin_frame_address(2);
41890+ /*
41891+ low ----------------------------------------------> high
41892+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
41893+ ^----------------^
41894+ allow copies only within here
41895+ */
41896+ while (stack <= frame && frame < stackend) {
41897+ /* if obj + len extends past the last frame, this
41898+ check won't pass and the next frame will be 0,
41899+ causing us to bail out and correctly report
41900+ the copy as invalid
41901+ */
41902+ if (obj + len <= frame)
41903+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41904+ oldframe = frame;
41905+ frame = *(const void * const *)frame;
41906+ }
41907+ return -1;
41908+#else
41909+ return 1;
41910+#endif
41911+}
41912+
41913+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41914+{
41915+ if (current->signal->curr_ip)
41916+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41917+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41918+ else
41919+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41920+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41921+ dump_stack();
41922+ gr_handle_kernel_exploit();
41923+ do_group_exit(SIGKILL);
41924+}
41925+#endif
41926+
41927+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41928+void pax_track_stack(void)
41929+{
41930+ unsigned long sp = (unsigned long)&sp;
41931+ if (sp < current_thread_info()->lowest_stack &&
41932+ sp > (unsigned long)task_stack_page(current))
41933+ current_thread_info()->lowest_stack = sp;
41934+}
41935+EXPORT_SYMBOL(pax_track_stack);
41936+#endif
41937+
41938 static int zap_process(struct task_struct *start, int exit_code)
41939 {
41940 struct task_struct *t;
41941@@ -2023,17 +2317,17 @@ static void wait_for_dump_helpers(struct file *file)
41942 pipe = file->f_path.dentry->d_inode->i_pipe;
41943
41944 pipe_lock(pipe);
41945- pipe->readers++;
41946- pipe->writers--;
41947+ atomic_inc(&pipe->readers);
41948+ atomic_dec(&pipe->writers);
41949
41950- while ((pipe->readers > 1) && (!signal_pending(current))) {
41951+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41952 wake_up_interruptible_sync(&pipe->wait);
41953 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41954 pipe_wait(pipe);
41955 }
41956
41957- pipe->readers--;
41958- pipe->writers++;
41959+ atomic_dec(&pipe->readers);
41960+ atomic_inc(&pipe->writers);
41961 pipe_unlock(pipe);
41962
41963 }
41964@@ -2094,7 +2388,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
41965 int retval = 0;
41966 int flag = 0;
41967 int ispipe;
41968- static atomic_t core_dump_count = ATOMIC_INIT(0);
41969+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41970 struct coredump_params cprm = {
41971 .signr = signr,
41972 .regs = regs,
41973@@ -2109,6 +2403,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
41974
41975 audit_core_dumps(signr);
41976
41977+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41978+ gr_handle_brute_attach(current, cprm.mm_flags);
41979+
41980 binfmt = mm->binfmt;
41981 if (!binfmt || !binfmt->core_dump)
41982 goto fail;
41983@@ -2176,7 +2473,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
41984 }
41985 cprm.limit = RLIM_INFINITY;
41986
41987- dump_count = atomic_inc_return(&core_dump_count);
41988+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
41989 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41990 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41991 task_tgid_vnr(current), current->comm);
41992@@ -2203,6 +2500,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
41993 } else {
41994 struct inode *inode;
41995
41996+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41997+
41998 if (cprm.limit < binfmt->min_coredump)
41999 goto fail_unlock;
42000
42001@@ -2246,7 +2545,7 @@ close_fail:
42002 filp_close(cprm.file, NULL);
42003 fail_dropcount:
42004 if (ispipe)
42005- atomic_dec(&core_dump_count);
42006+ atomic_dec_unchecked(&core_dump_count);
42007 fail_unlock:
42008 kfree(cn.corename);
42009 fail_corename:
42010@@ -2265,7 +2564,7 @@ fail:
42011 */
42012 int dump_write(struct file *file, const void *addr, int nr)
42013 {
42014- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42015+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42016 }
42017 EXPORT_SYMBOL(dump_write);
42018
42019diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
42020index a8cbe1b..fed04cb 100644
42021--- a/fs/ext2/balloc.c
42022+++ b/fs/ext2/balloc.c
42023@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
42024
42025 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42026 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42027- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42028+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42029 sbi->s_resuid != current_fsuid() &&
42030 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42031 return 0;
42032diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
42033index a203892..4e64db5 100644
42034--- a/fs/ext3/balloc.c
42035+++ b/fs/ext3/balloc.c
42036@@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
42037
42038 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42039 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42040- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42041+ if (free_blocks < root_blocks + 1 &&
42042 !use_reservation && sbi->s_resuid != current_fsuid() &&
42043- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42044+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
42045+ !capable_nolog(CAP_SYS_RESOURCE)) {
42046 return 0;
42047 }
42048 return 1;
42049diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
42050index 12ccacd..a6035fce0 100644
42051--- a/fs/ext4/balloc.c
42052+++ b/fs/ext4/balloc.c
42053@@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
42054 /* Hm, nope. Are (enough) root reserved clusters available? */
42055 if (sbi->s_resuid == current_fsuid() ||
42056 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42057- capable(CAP_SYS_RESOURCE) ||
42058- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42059+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42060+ capable_nolog(CAP_SYS_RESOURCE)) {
42061
42062 if (free_clusters >= (nclusters + dirty_clusters))
42063 return 1;
42064diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
42065index 5b0e26a..0aa002d 100644
42066--- a/fs/ext4/ext4.h
42067+++ b/fs/ext4/ext4.h
42068@@ -1208,19 +1208,19 @@ struct ext4_sb_info {
42069 unsigned long s_mb_last_start;
42070
42071 /* stats for buddy allocator */
42072- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42073- atomic_t s_bal_success; /* we found long enough chunks */
42074- atomic_t s_bal_allocated; /* in blocks */
42075- atomic_t s_bal_ex_scanned; /* total extents scanned */
42076- atomic_t s_bal_goals; /* goal hits */
42077- atomic_t s_bal_breaks; /* too long searches */
42078- atomic_t s_bal_2orders; /* 2^order hits */
42079+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42080+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42081+ atomic_unchecked_t s_bal_allocated; /* in blocks */
42082+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42083+ atomic_unchecked_t s_bal_goals; /* goal hits */
42084+ atomic_unchecked_t s_bal_breaks; /* too long searches */
42085+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42086 spinlock_t s_bal_lock;
42087 unsigned long s_mb_buddies_generated;
42088 unsigned long long s_mb_generation_time;
42089- atomic_t s_mb_lost_chunks;
42090- atomic_t s_mb_preallocated;
42091- atomic_t s_mb_discarded;
42092+ atomic_unchecked_t s_mb_lost_chunks;
42093+ atomic_unchecked_t s_mb_preallocated;
42094+ atomic_unchecked_t s_mb_discarded;
42095 atomic_t s_lock_busy;
42096
42097 /* locality groups */
42098diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
42099index e2d8be8..c7f0ce9 100644
42100--- a/fs/ext4/mballoc.c
42101+++ b/fs/ext4/mballoc.c
42102@@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
42103 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42104
42105 if (EXT4_SB(sb)->s_mb_stats)
42106- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42107+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42108
42109 break;
42110 }
42111@@ -2088,7 +2088,7 @@ repeat:
42112 ac->ac_status = AC_STATUS_CONTINUE;
42113 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42114 cr = 3;
42115- atomic_inc(&sbi->s_mb_lost_chunks);
42116+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42117 goto repeat;
42118 }
42119 }
42120@@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
42121 if (sbi->s_mb_stats) {
42122 ext4_msg(sb, KERN_INFO,
42123 "mballoc: %u blocks %u reqs (%u success)",
42124- atomic_read(&sbi->s_bal_allocated),
42125- atomic_read(&sbi->s_bal_reqs),
42126- atomic_read(&sbi->s_bal_success));
42127+ atomic_read_unchecked(&sbi->s_bal_allocated),
42128+ atomic_read_unchecked(&sbi->s_bal_reqs),
42129+ atomic_read_unchecked(&sbi->s_bal_success));
42130 ext4_msg(sb, KERN_INFO,
42131 "mballoc: %u extents scanned, %u goal hits, "
42132 "%u 2^N hits, %u breaks, %u lost",
42133- atomic_read(&sbi->s_bal_ex_scanned),
42134- atomic_read(&sbi->s_bal_goals),
42135- atomic_read(&sbi->s_bal_2orders),
42136- atomic_read(&sbi->s_bal_breaks),
42137- atomic_read(&sbi->s_mb_lost_chunks));
42138+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42139+ atomic_read_unchecked(&sbi->s_bal_goals),
42140+ atomic_read_unchecked(&sbi->s_bal_2orders),
42141+ atomic_read_unchecked(&sbi->s_bal_breaks),
42142+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42143 ext4_msg(sb, KERN_INFO,
42144 "mballoc: %lu generated and it took %Lu",
42145 sbi->s_mb_buddies_generated,
42146 sbi->s_mb_generation_time);
42147 ext4_msg(sb, KERN_INFO,
42148 "mballoc: %u preallocated, %u discarded",
42149- atomic_read(&sbi->s_mb_preallocated),
42150- atomic_read(&sbi->s_mb_discarded));
42151+ atomic_read_unchecked(&sbi->s_mb_preallocated),
42152+ atomic_read_unchecked(&sbi->s_mb_discarded));
42153 }
42154
42155 free_percpu(sbi->s_locality_groups);
42156@@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
42157 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42158
42159 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42160- atomic_inc(&sbi->s_bal_reqs);
42161- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42162+ atomic_inc_unchecked(&sbi->s_bal_reqs);
42163+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42164 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42165- atomic_inc(&sbi->s_bal_success);
42166- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42167+ atomic_inc_unchecked(&sbi->s_bal_success);
42168+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42169 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42170 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42171- atomic_inc(&sbi->s_bal_goals);
42172+ atomic_inc_unchecked(&sbi->s_bal_goals);
42173 if (ac->ac_found > sbi->s_mb_max_to_scan)
42174- atomic_inc(&sbi->s_bal_breaks);
42175+ atomic_inc_unchecked(&sbi->s_bal_breaks);
42176 }
42177
42178 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42179@@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
42180 trace_ext4_mb_new_inode_pa(ac, pa);
42181
42182 ext4_mb_use_inode_pa(ac, pa);
42183- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
42184+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
42185
42186 ei = EXT4_I(ac->ac_inode);
42187 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42188@@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
42189 trace_ext4_mb_new_group_pa(ac, pa);
42190
42191 ext4_mb_use_group_pa(ac, pa);
42192- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42193+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42194
42195 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42196 lg = ac->ac_lg;
42197@@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
42198 * from the bitmap and continue.
42199 */
42200 }
42201- atomic_add(free, &sbi->s_mb_discarded);
42202+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
42203
42204 return err;
42205 }
42206@@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
42207 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42208 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42209 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42210- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42211+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42212 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42213
42214 return 0;
42215diff --git a/fs/fcntl.c b/fs/fcntl.c
42216index 22764c7..86372c9 100644
42217--- a/fs/fcntl.c
42218+++ b/fs/fcntl.c
42219@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
42220 if (err)
42221 return err;
42222
42223+ if (gr_handle_chroot_fowner(pid, type))
42224+ return -ENOENT;
42225+ if (gr_check_protected_task_fowner(pid, type))
42226+ return -EACCES;
42227+
42228 f_modown(filp, pid, type, force);
42229 return 0;
42230 }
42231@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42232
42233 static int f_setown_ex(struct file *filp, unsigned long arg)
42234 {
42235- struct f_owner_ex * __user owner_p = (void * __user)arg;
42236+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42237 struct f_owner_ex owner;
42238 struct pid *pid;
42239 int type;
42240@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
42241
42242 static int f_getown_ex(struct file *filp, unsigned long arg)
42243 {
42244- struct f_owner_ex * __user owner_p = (void * __user)arg;
42245+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42246 struct f_owner_ex owner;
42247 int ret = 0;
42248
42249@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
42250 switch (cmd) {
42251 case F_DUPFD:
42252 case F_DUPFD_CLOEXEC:
42253+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42254 if (arg >= rlimit(RLIMIT_NOFILE))
42255 break;
42256 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42257diff --git a/fs/fifo.c b/fs/fifo.c
42258index b1a524d..4ee270e 100644
42259--- a/fs/fifo.c
42260+++ b/fs/fifo.c
42261@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
42262 */
42263 filp->f_op = &read_pipefifo_fops;
42264 pipe->r_counter++;
42265- if (pipe->readers++ == 0)
42266+ if (atomic_inc_return(&pipe->readers) == 1)
42267 wake_up_partner(inode);
42268
42269- if (!pipe->writers) {
42270+ if (!atomic_read(&pipe->writers)) {
42271 if ((filp->f_flags & O_NONBLOCK)) {
42272 /* suppress POLLHUP until we have
42273 * seen a writer */
42274@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
42275 * errno=ENXIO when there is no process reading the FIFO.
42276 */
42277 ret = -ENXIO;
42278- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42279+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42280 goto err;
42281
42282 filp->f_op = &write_pipefifo_fops;
42283 pipe->w_counter++;
42284- if (!pipe->writers++)
42285+ if (atomic_inc_return(&pipe->writers) == 1)
42286 wake_up_partner(inode);
42287
42288- if (!pipe->readers) {
42289+ if (!atomic_read(&pipe->readers)) {
42290 wait_for_partner(inode, &pipe->r_counter);
42291 if (signal_pending(current))
42292 goto err_wr;
42293@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
42294 */
42295 filp->f_op = &rdwr_pipefifo_fops;
42296
42297- pipe->readers++;
42298- pipe->writers++;
42299+ atomic_inc(&pipe->readers);
42300+ atomic_inc(&pipe->writers);
42301 pipe->r_counter++;
42302 pipe->w_counter++;
42303- if (pipe->readers == 1 || pipe->writers == 1)
42304+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42305 wake_up_partner(inode);
42306 break;
42307
42308@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
42309 return 0;
42310
42311 err_rd:
42312- if (!--pipe->readers)
42313+ if (atomic_dec_and_test(&pipe->readers))
42314 wake_up_interruptible(&pipe->wait);
42315 ret = -ERESTARTSYS;
42316 goto err;
42317
42318 err_wr:
42319- if (!--pipe->writers)
42320+ if (atomic_dec_and_test(&pipe->writers))
42321 wake_up_interruptible(&pipe->wait);
42322 ret = -ERESTARTSYS;
42323 goto err;
42324
42325 err:
42326- if (!pipe->readers && !pipe->writers)
42327+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42328 free_pipe_info(inode);
42329
42330 err_nocleanup:
42331diff --git a/fs/file.c b/fs/file.c
42332index 4c6992d..104cdea 100644
42333--- a/fs/file.c
42334+++ b/fs/file.c
42335@@ -15,6 +15,7 @@
42336 #include <linux/slab.h>
42337 #include <linux/vmalloc.h>
42338 #include <linux/file.h>
42339+#include <linux/security.h>
42340 #include <linux/fdtable.h>
42341 #include <linux/bitops.h>
42342 #include <linux/interrupt.h>
42343@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
42344 * N.B. For clone tasks sharing a files structure, this test
42345 * will limit the total number of files that can be opened.
42346 */
42347+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42348 if (nr >= rlimit(RLIMIT_NOFILE))
42349 return -EMFILE;
42350
42351diff --git a/fs/filesystems.c b/fs/filesystems.c
42352index 0845f84..7b4ebef 100644
42353--- a/fs/filesystems.c
42354+++ b/fs/filesystems.c
42355@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
42356 int len = dot ? dot - name : strlen(name);
42357
42358 fs = __get_fs_type(name, len);
42359+
42360+#ifdef CONFIG_GRKERNSEC_MODHARDEN
42361+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42362+#else
42363 if (!fs && (request_module("%.*s", len, name) == 0))
42364+#endif
42365 fs = __get_fs_type(name, len);
42366
42367 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42368diff --git a/fs/fs_struct.c b/fs/fs_struct.c
42369index 78b519c..212c0d0 100644
42370--- a/fs/fs_struct.c
42371+++ b/fs/fs_struct.c
42372@@ -4,6 +4,7 @@
42373 #include <linux/path.h>
42374 #include <linux/slab.h>
42375 #include <linux/fs_struct.h>
42376+#include <linux/grsecurity.h>
42377 #include "internal.h"
42378
42379 static inline void path_get_longterm(struct path *path)
42380@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
42381 old_root = fs->root;
42382 fs->root = *path;
42383 path_get_longterm(path);
42384+ gr_set_chroot_entries(current, path);
42385 write_seqcount_end(&fs->seq);
42386 spin_unlock(&fs->lock);
42387 if (old_root.dentry)
42388@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
42389 && fs->root.mnt == old_root->mnt) {
42390 path_get_longterm(new_root);
42391 fs->root = *new_root;
42392+ gr_set_chroot_entries(p, new_root);
42393 count++;
42394 }
42395 if (fs->pwd.dentry == old_root->dentry
42396@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
42397 spin_lock(&fs->lock);
42398 write_seqcount_begin(&fs->seq);
42399 tsk->fs = NULL;
42400- kill = !--fs->users;
42401+ gr_clear_chroot_entries(tsk);
42402+ kill = !atomic_dec_return(&fs->users);
42403 write_seqcount_end(&fs->seq);
42404 spin_unlock(&fs->lock);
42405 task_unlock(tsk);
42406@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42407 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42408 /* We don't need to lock fs - think why ;-) */
42409 if (fs) {
42410- fs->users = 1;
42411+ atomic_set(&fs->users, 1);
42412 fs->in_exec = 0;
42413 spin_lock_init(&fs->lock);
42414 seqcount_init(&fs->seq);
42415@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42416 spin_lock(&old->lock);
42417 fs->root = old->root;
42418 path_get_longterm(&fs->root);
42419+ /* instead of calling gr_set_chroot_entries here,
42420+ we call it from every caller of this function
42421+ */
42422 fs->pwd = old->pwd;
42423 path_get_longterm(&fs->pwd);
42424 spin_unlock(&old->lock);
42425@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
42426
42427 task_lock(current);
42428 spin_lock(&fs->lock);
42429- kill = !--fs->users;
42430+ kill = !atomic_dec_return(&fs->users);
42431 current->fs = new_fs;
42432+ gr_set_chroot_entries(current, &new_fs->root);
42433 spin_unlock(&fs->lock);
42434 task_unlock(current);
42435
42436@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
42437
42438 /* to be mentioned only in INIT_TASK */
42439 struct fs_struct init_fs = {
42440- .users = 1,
42441+ .users = ATOMIC_INIT(1),
42442 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
42443 .seq = SEQCNT_ZERO,
42444 .umask = 0022,
42445@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
42446 task_lock(current);
42447
42448 spin_lock(&init_fs.lock);
42449- init_fs.users++;
42450+ atomic_inc(&init_fs.users);
42451 spin_unlock(&init_fs.lock);
42452
42453 spin_lock(&fs->lock);
42454 current->fs = &init_fs;
42455- kill = !--fs->users;
42456+ gr_set_chroot_entries(current, &current->fs->root);
42457+ kill = !atomic_dec_return(&fs->users);
42458 spin_unlock(&fs->lock);
42459
42460 task_unlock(current);
42461diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
42462index 9905350..02eaec4 100644
42463--- a/fs/fscache/cookie.c
42464+++ b/fs/fscache/cookie.c
42465@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
42466 parent ? (char *) parent->def->name : "<no-parent>",
42467 def->name, netfs_data);
42468
42469- fscache_stat(&fscache_n_acquires);
42470+ fscache_stat_unchecked(&fscache_n_acquires);
42471
42472 /* if there's no parent cookie, then we don't create one here either */
42473 if (!parent) {
42474- fscache_stat(&fscache_n_acquires_null);
42475+ fscache_stat_unchecked(&fscache_n_acquires_null);
42476 _leave(" [no parent]");
42477 return NULL;
42478 }
42479@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
42480 /* allocate and initialise a cookie */
42481 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42482 if (!cookie) {
42483- fscache_stat(&fscache_n_acquires_oom);
42484+ fscache_stat_unchecked(&fscache_n_acquires_oom);
42485 _leave(" [ENOMEM]");
42486 return NULL;
42487 }
42488@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42489
42490 switch (cookie->def->type) {
42491 case FSCACHE_COOKIE_TYPE_INDEX:
42492- fscache_stat(&fscache_n_cookie_index);
42493+ fscache_stat_unchecked(&fscache_n_cookie_index);
42494 break;
42495 case FSCACHE_COOKIE_TYPE_DATAFILE:
42496- fscache_stat(&fscache_n_cookie_data);
42497+ fscache_stat_unchecked(&fscache_n_cookie_data);
42498 break;
42499 default:
42500- fscache_stat(&fscache_n_cookie_special);
42501+ fscache_stat_unchecked(&fscache_n_cookie_special);
42502 break;
42503 }
42504
42505@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42506 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42507 atomic_dec(&parent->n_children);
42508 __fscache_cookie_put(cookie);
42509- fscache_stat(&fscache_n_acquires_nobufs);
42510+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42511 _leave(" = NULL");
42512 return NULL;
42513 }
42514 }
42515
42516- fscache_stat(&fscache_n_acquires_ok);
42517+ fscache_stat_unchecked(&fscache_n_acquires_ok);
42518 _leave(" = %p", cookie);
42519 return cookie;
42520 }
42521@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
42522 cache = fscache_select_cache_for_object(cookie->parent);
42523 if (!cache) {
42524 up_read(&fscache_addremove_sem);
42525- fscache_stat(&fscache_n_acquires_no_cache);
42526+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42527 _leave(" = -ENOMEDIUM [no cache]");
42528 return -ENOMEDIUM;
42529 }
42530@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
42531 object = cache->ops->alloc_object(cache, cookie);
42532 fscache_stat_d(&fscache_n_cop_alloc_object);
42533 if (IS_ERR(object)) {
42534- fscache_stat(&fscache_n_object_no_alloc);
42535+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
42536 ret = PTR_ERR(object);
42537 goto error;
42538 }
42539
42540- fscache_stat(&fscache_n_object_alloc);
42541+ fscache_stat_unchecked(&fscache_n_object_alloc);
42542
42543 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42544
42545@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
42546 struct fscache_object *object;
42547 struct hlist_node *_p;
42548
42549- fscache_stat(&fscache_n_updates);
42550+ fscache_stat_unchecked(&fscache_n_updates);
42551
42552 if (!cookie) {
42553- fscache_stat(&fscache_n_updates_null);
42554+ fscache_stat_unchecked(&fscache_n_updates_null);
42555 _leave(" [no cookie]");
42556 return;
42557 }
42558@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42559 struct fscache_object *object;
42560 unsigned long event;
42561
42562- fscache_stat(&fscache_n_relinquishes);
42563+ fscache_stat_unchecked(&fscache_n_relinquishes);
42564 if (retire)
42565- fscache_stat(&fscache_n_relinquishes_retire);
42566+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42567
42568 if (!cookie) {
42569- fscache_stat(&fscache_n_relinquishes_null);
42570+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
42571 _leave(" [no cookie]");
42572 return;
42573 }
42574@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42575
42576 /* wait for the cookie to finish being instantiated (or to fail) */
42577 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42578- fscache_stat(&fscache_n_relinquishes_waitcrt);
42579+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42580 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42581 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42582 }
42583diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
42584index f6aad48..88dcf26 100644
42585--- a/fs/fscache/internal.h
42586+++ b/fs/fscache/internal.h
42587@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42588 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42589 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42590
42591-extern atomic_t fscache_n_op_pend;
42592-extern atomic_t fscache_n_op_run;
42593-extern atomic_t fscache_n_op_enqueue;
42594-extern atomic_t fscache_n_op_deferred_release;
42595-extern atomic_t fscache_n_op_release;
42596-extern atomic_t fscache_n_op_gc;
42597-extern atomic_t fscache_n_op_cancelled;
42598-extern atomic_t fscache_n_op_rejected;
42599+extern atomic_unchecked_t fscache_n_op_pend;
42600+extern atomic_unchecked_t fscache_n_op_run;
42601+extern atomic_unchecked_t fscache_n_op_enqueue;
42602+extern atomic_unchecked_t fscache_n_op_deferred_release;
42603+extern atomic_unchecked_t fscache_n_op_release;
42604+extern atomic_unchecked_t fscache_n_op_gc;
42605+extern atomic_unchecked_t fscache_n_op_cancelled;
42606+extern atomic_unchecked_t fscache_n_op_rejected;
42607
42608-extern atomic_t fscache_n_attr_changed;
42609-extern atomic_t fscache_n_attr_changed_ok;
42610-extern atomic_t fscache_n_attr_changed_nobufs;
42611-extern atomic_t fscache_n_attr_changed_nomem;
42612-extern atomic_t fscache_n_attr_changed_calls;
42613+extern atomic_unchecked_t fscache_n_attr_changed;
42614+extern atomic_unchecked_t fscache_n_attr_changed_ok;
42615+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42616+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42617+extern atomic_unchecked_t fscache_n_attr_changed_calls;
42618
42619-extern atomic_t fscache_n_allocs;
42620-extern atomic_t fscache_n_allocs_ok;
42621-extern atomic_t fscache_n_allocs_wait;
42622-extern atomic_t fscache_n_allocs_nobufs;
42623-extern atomic_t fscache_n_allocs_intr;
42624-extern atomic_t fscache_n_allocs_object_dead;
42625-extern atomic_t fscache_n_alloc_ops;
42626-extern atomic_t fscache_n_alloc_op_waits;
42627+extern atomic_unchecked_t fscache_n_allocs;
42628+extern atomic_unchecked_t fscache_n_allocs_ok;
42629+extern atomic_unchecked_t fscache_n_allocs_wait;
42630+extern atomic_unchecked_t fscache_n_allocs_nobufs;
42631+extern atomic_unchecked_t fscache_n_allocs_intr;
42632+extern atomic_unchecked_t fscache_n_allocs_object_dead;
42633+extern atomic_unchecked_t fscache_n_alloc_ops;
42634+extern atomic_unchecked_t fscache_n_alloc_op_waits;
42635
42636-extern atomic_t fscache_n_retrievals;
42637-extern atomic_t fscache_n_retrievals_ok;
42638-extern atomic_t fscache_n_retrievals_wait;
42639-extern atomic_t fscache_n_retrievals_nodata;
42640-extern atomic_t fscache_n_retrievals_nobufs;
42641-extern atomic_t fscache_n_retrievals_intr;
42642-extern atomic_t fscache_n_retrievals_nomem;
42643-extern atomic_t fscache_n_retrievals_object_dead;
42644-extern atomic_t fscache_n_retrieval_ops;
42645-extern atomic_t fscache_n_retrieval_op_waits;
42646+extern atomic_unchecked_t fscache_n_retrievals;
42647+extern atomic_unchecked_t fscache_n_retrievals_ok;
42648+extern atomic_unchecked_t fscache_n_retrievals_wait;
42649+extern atomic_unchecked_t fscache_n_retrievals_nodata;
42650+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42651+extern atomic_unchecked_t fscache_n_retrievals_intr;
42652+extern atomic_unchecked_t fscache_n_retrievals_nomem;
42653+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42654+extern atomic_unchecked_t fscache_n_retrieval_ops;
42655+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42656
42657-extern atomic_t fscache_n_stores;
42658-extern atomic_t fscache_n_stores_ok;
42659-extern atomic_t fscache_n_stores_again;
42660-extern atomic_t fscache_n_stores_nobufs;
42661-extern atomic_t fscache_n_stores_oom;
42662-extern atomic_t fscache_n_store_ops;
42663-extern atomic_t fscache_n_store_calls;
42664-extern atomic_t fscache_n_store_pages;
42665-extern atomic_t fscache_n_store_radix_deletes;
42666-extern atomic_t fscache_n_store_pages_over_limit;
42667+extern atomic_unchecked_t fscache_n_stores;
42668+extern atomic_unchecked_t fscache_n_stores_ok;
42669+extern atomic_unchecked_t fscache_n_stores_again;
42670+extern atomic_unchecked_t fscache_n_stores_nobufs;
42671+extern atomic_unchecked_t fscache_n_stores_oom;
42672+extern atomic_unchecked_t fscache_n_store_ops;
42673+extern atomic_unchecked_t fscache_n_store_calls;
42674+extern atomic_unchecked_t fscache_n_store_pages;
42675+extern atomic_unchecked_t fscache_n_store_radix_deletes;
42676+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42677
42678-extern atomic_t fscache_n_store_vmscan_not_storing;
42679-extern atomic_t fscache_n_store_vmscan_gone;
42680-extern atomic_t fscache_n_store_vmscan_busy;
42681-extern atomic_t fscache_n_store_vmscan_cancelled;
42682+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42683+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42684+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42685+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42686
42687-extern atomic_t fscache_n_marks;
42688-extern atomic_t fscache_n_uncaches;
42689+extern atomic_unchecked_t fscache_n_marks;
42690+extern atomic_unchecked_t fscache_n_uncaches;
42691
42692-extern atomic_t fscache_n_acquires;
42693-extern atomic_t fscache_n_acquires_null;
42694-extern atomic_t fscache_n_acquires_no_cache;
42695-extern atomic_t fscache_n_acquires_ok;
42696-extern atomic_t fscache_n_acquires_nobufs;
42697-extern atomic_t fscache_n_acquires_oom;
42698+extern atomic_unchecked_t fscache_n_acquires;
42699+extern atomic_unchecked_t fscache_n_acquires_null;
42700+extern atomic_unchecked_t fscache_n_acquires_no_cache;
42701+extern atomic_unchecked_t fscache_n_acquires_ok;
42702+extern atomic_unchecked_t fscache_n_acquires_nobufs;
42703+extern atomic_unchecked_t fscache_n_acquires_oom;
42704
42705-extern atomic_t fscache_n_updates;
42706-extern atomic_t fscache_n_updates_null;
42707-extern atomic_t fscache_n_updates_run;
42708+extern atomic_unchecked_t fscache_n_updates;
42709+extern atomic_unchecked_t fscache_n_updates_null;
42710+extern atomic_unchecked_t fscache_n_updates_run;
42711
42712-extern atomic_t fscache_n_relinquishes;
42713-extern atomic_t fscache_n_relinquishes_null;
42714-extern atomic_t fscache_n_relinquishes_waitcrt;
42715-extern atomic_t fscache_n_relinquishes_retire;
42716+extern atomic_unchecked_t fscache_n_relinquishes;
42717+extern atomic_unchecked_t fscache_n_relinquishes_null;
42718+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42719+extern atomic_unchecked_t fscache_n_relinquishes_retire;
42720
42721-extern atomic_t fscache_n_cookie_index;
42722-extern atomic_t fscache_n_cookie_data;
42723-extern atomic_t fscache_n_cookie_special;
42724+extern atomic_unchecked_t fscache_n_cookie_index;
42725+extern atomic_unchecked_t fscache_n_cookie_data;
42726+extern atomic_unchecked_t fscache_n_cookie_special;
42727
42728-extern atomic_t fscache_n_object_alloc;
42729-extern atomic_t fscache_n_object_no_alloc;
42730-extern atomic_t fscache_n_object_lookups;
42731-extern atomic_t fscache_n_object_lookups_negative;
42732-extern atomic_t fscache_n_object_lookups_positive;
42733-extern atomic_t fscache_n_object_lookups_timed_out;
42734-extern atomic_t fscache_n_object_created;
42735-extern atomic_t fscache_n_object_avail;
42736-extern atomic_t fscache_n_object_dead;
42737+extern atomic_unchecked_t fscache_n_object_alloc;
42738+extern atomic_unchecked_t fscache_n_object_no_alloc;
42739+extern atomic_unchecked_t fscache_n_object_lookups;
42740+extern atomic_unchecked_t fscache_n_object_lookups_negative;
42741+extern atomic_unchecked_t fscache_n_object_lookups_positive;
42742+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42743+extern atomic_unchecked_t fscache_n_object_created;
42744+extern atomic_unchecked_t fscache_n_object_avail;
42745+extern atomic_unchecked_t fscache_n_object_dead;
42746
42747-extern atomic_t fscache_n_checkaux_none;
42748-extern atomic_t fscache_n_checkaux_okay;
42749-extern atomic_t fscache_n_checkaux_update;
42750-extern atomic_t fscache_n_checkaux_obsolete;
42751+extern atomic_unchecked_t fscache_n_checkaux_none;
42752+extern atomic_unchecked_t fscache_n_checkaux_okay;
42753+extern atomic_unchecked_t fscache_n_checkaux_update;
42754+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42755
42756 extern atomic_t fscache_n_cop_alloc_object;
42757 extern atomic_t fscache_n_cop_lookup_object;
42758@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
42759 atomic_inc(stat);
42760 }
42761
42762+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42763+{
42764+ atomic_inc_unchecked(stat);
42765+}
42766+
42767 static inline void fscache_stat_d(atomic_t *stat)
42768 {
42769 atomic_dec(stat);
42770@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
42771
42772 #define __fscache_stat(stat) (NULL)
42773 #define fscache_stat(stat) do {} while (0)
42774+#define fscache_stat_unchecked(stat) do {} while (0)
42775 #define fscache_stat_d(stat) do {} while (0)
42776 #endif
42777
42778diff --git a/fs/fscache/object.c b/fs/fscache/object.c
42779index b6b897c..0ffff9c 100644
42780--- a/fs/fscache/object.c
42781+++ b/fs/fscache/object.c
42782@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
42783 /* update the object metadata on disk */
42784 case FSCACHE_OBJECT_UPDATING:
42785 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42786- fscache_stat(&fscache_n_updates_run);
42787+ fscache_stat_unchecked(&fscache_n_updates_run);
42788 fscache_stat(&fscache_n_cop_update_object);
42789 object->cache->ops->update_object(object);
42790 fscache_stat_d(&fscache_n_cop_update_object);
42791@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
42792 spin_lock(&object->lock);
42793 object->state = FSCACHE_OBJECT_DEAD;
42794 spin_unlock(&object->lock);
42795- fscache_stat(&fscache_n_object_dead);
42796+ fscache_stat_unchecked(&fscache_n_object_dead);
42797 goto terminal_transit;
42798
42799 /* handle the parent cache of this object being withdrawn from
42800@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
42801 spin_lock(&object->lock);
42802 object->state = FSCACHE_OBJECT_DEAD;
42803 spin_unlock(&object->lock);
42804- fscache_stat(&fscache_n_object_dead);
42805+ fscache_stat_unchecked(&fscache_n_object_dead);
42806 goto terminal_transit;
42807
42808 /* complain about the object being woken up once it is
42809@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
42810 parent->cookie->def->name, cookie->def->name,
42811 object->cache->tag->name);
42812
42813- fscache_stat(&fscache_n_object_lookups);
42814+ fscache_stat_unchecked(&fscache_n_object_lookups);
42815 fscache_stat(&fscache_n_cop_lookup_object);
42816 ret = object->cache->ops->lookup_object(object);
42817 fscache_stat_d(&fscache_n_cop_lookup_object);
42818@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
42819 if (ret == -ETIMEDOUT) {
42820 /* probably stuck behind another object, so move this one to
42821 * the back of the queue */
42822- fscache_stat(&fscache_n_object_lookups_timed_out);
42823+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42824 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42825 }
42826
42827@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
42828
42829 spin_lock(&object->lock);
42830 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42831- fscache_stat(&fscache_n_object_lookups_negative);
42832+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42833
42834 /* transit here to allow write requests to begin stacking up
42835 * and read requests to begin returning ENODATA */
42836@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
42837 * result, in which case there may be data available */
42838 spin_lock(&object->lock);
42839 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42840- fscache_stat(&fscache_n_object_lookups_positive);
42841+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42842
42843 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42844
42845@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
42846 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42847 } else {
42848 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42849- fscache_stat(&fscache_n_object_created);
42850+ fscache_stat_unchecked(&fscache_n_object_created);
42851
42852 object->state = FSCACHE_OBJECT_AVAILABLE;
42853 spin_unlock(&object->lock);
42854@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
42855 fscache_enqueue_dependents(object);
42856
42857 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42858- fscache_stat(&fscache_n_object_avail);
42859+ fscache_stat_unchecked(&fscache_n_object_avail);
42860
42861 _leave("");
42862 }
42863@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
42864 enum fscache_checkaux result;
42865
42866 if (!object->cookie->def->check_aux) {
42867- fscache_stat(&fscache_n_checkaux_none);
42868+ fscache_stat_unchecked(&fscache_n_checkaux_none);
42869 return FSCACHE_CHECKAUX_OKAY;
42870 }
42871
42872@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
42873 switch (result) {
42874 /* entry okay as is */
42875 case FSCACHE_CHECKAUX_OKAY:
42876- fscache_stat(&fscache_n_checkaux_okay);
42877+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
42878 break;
42879
42880 /* entry requires update */
42881 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42882- fscache_stat(&fscache_n_checkaux_update);
42883+ fscache_stat_unchecked(&fscache_n_checkaux_update);
42884 break;
42885
42886 /* entry requires deletion */
42887 case FSCACHE_CHECKAUX_OBSOLETE:
42888- fscache_stat(&fscache_n_checkaux_obsolete);
42889+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42890 break;
42891
42892 default:
42893diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
42894index 30afdfa..2256596 100644
42895--- a/fs/fscache/operation.c
42896+++ b/fs/fscache/operation.c
42897@@ -17,7 +17,7 @@
42898 #include <linux/slab.h>
42899 #include "internal.h"
42900
42901-atomic_t fscache_op_debug_id;
42902+atomic_unchecked_t fscache_op_debug_id;
42903 EXPORT_SYMBOL(fscache_op_debug_id);
42904
42905 /**
42906@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
42907 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42908 ASSERTCMP(atomic_read(&op->usage), >, 0);
42909
42910- fscache_stat(&fscache_n_op_enqueue);
42911+ fscache_stat_unchecked(&fscache_n_op_enqueue);
42912 switch (op->flags & FSCACHE_OP_TYPE) {
42913 case FSCACHE_OP_ASYNC:
42914 _debug("queue async");
42915@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
42916 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42917 if (op->processor)
42918 fscache_enqueue_operation(op);
42919- fscache_stat(&fscache_n_op_run);
42920+ fscache_stat_unchecked(&fscache_n_op_run);
42921 }
42922
42923 /*
42924@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
42925 if (object->n_ops > 1) {
42926 atomic_inc(&op->usage);
42927 list_add_tail(&op->pend_link, &object->pending_ops);
42928- fscache_stat(&fscache_n_op_pend);
42929+ fscache_stat_unchecked(&fscache_n_op_pend);
42930 } else if (!list_empty(&object->pending_ops)) {
42931 atomic_inc(&op->usage);
42932 list_add_tail(&op->pend_link, &object->pending_ops);
42933- fscache_stat(&fscache_n_op_pend);
42934+ fscache_stat_unchecked(&fscache_n_op_pend);
42935 fscache_start_operations(object);
42936 } else {
42937 ASSERTCMP(object->n_in_progress, ==, 0);
42938@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
42939 object->n_exclusive++; /* reads and writes must wait */
42940 atomic_inc(&op->usage);
42941 list_add_tail(&op->pend_link, &object->pending_ops);
42942- fscache_stat(&fscache_n_op_pend);
42943+ fscache_stat_unchecked(&fscache_n_op_pend);
42944 ret = 0;
42945 } else {
42946 /* not allowed to submit ops in any other state */
42947@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
42948 if (object->n_exclusive > 0) {
42949 atomic_inc(&op->usage);
42950 list_add_tail(&op->pend_link, &object->pending_ops);
42951- fscache_stat(&fscache_n_op_pend);
42952+ fscache_stat_unchecked(&fscache_n_op_pend);
42953 } else if (!list_empty(&object->pending_ops)) {
42954 atomic_inc(&op->usage);
42955 list_add_tail(&op->pend_link, &object->pending_ops);
42956- fscache_stat(&fscache_n_op_pend);
42957+ fscache_stat_unchecked(&fscache_n_op_pend);
42958 fscache_start_operations(object);
42959 } else {
42960 ASSERTCMP(object->n_exclusive, ==, 0);
42961@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
42962 object->n_ops++;
42963 atomic_inc(&op->usage);
42964 list_add_tail(&op->pend_link, &object->pending_ops);
42965- fscache_stat(&fscache_n_op_pend);
42966+ fscache_stat_unchecked(&fscache_n_op_pend);
42967 ret = 0;
42968 } else if (object->state == FSCACHE_OBJECT_DYING ||
42969 object->state == FSCACHE_OBJECT_LC_DYING ||
42970 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42971- fscache_stat(&fscache_n_op_rejected);
42972+ fscache_stat_unchecked(&fscache_n_op_rejected);
42973 ret = -ENOBUFS;
42974 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42975 fscache_report_unexpected_submission(object, op, ostate);
42976@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
42977
42978 ret = -EBUSY;
42979 if (!list_empty(&op->pend_link)) {
42980- fscache_stat(&fscache_n_op_cancelled);
42981+ fscache_stat_unchecked(&fscache_n_op_cancelled);
42982 list_del_init(&op->pend_link);
42983 object->n_ops--;
42984 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42985@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
42986 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42987 BUG();
42988
42989- fscache_stat(&fscache_n_op_release);
42990+ fscache_stat_unchecked(&fscache_n_op_release);
42991
42992 if (op->release) {
42993 op->release(op);
42994@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
42995 * lock, and defer it otherwise */
42996 if (!spin_trylock(&object->lock)) {
42997 _debug("defer put");
42998- fscache_stat(&fscache_n_op_deferred_release);
42999+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
43000
43001 cache = object->cache;
43002 spin_lock(&cache->op_gc_list_lock);
43003@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
43004
43005 _debug("GC DEFERRED REL OBJ%x OP%x",
43006 object->debug_id, op->debug_id);
43007- fscache_stat(&fscache_n_op_gc);
43008+ fscache_stat_unchecked(&fscache_n_op_gc);
43009
43010 ASSERTCMP(atomic_read(&op->usage), ==, 0);
43011
43012diff --git a/fs/fscache/page.c b/fs/fscache/page.c
43013index 3f7a59b..cf196cc 100644
43014--- a/fs/fscache/page.c
43015+++ b/fs/fscache/page.c
43016@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43017 val = radix_tree_lookup(&cookie->stores, page->index);
43018 if (!val) {
43019 rcu_read_unlock();
43020- fscache_stat(&fscache_n_store_vmscan_not_storing);
43021+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
43022 __fscache_uncache_page(cookie, page);
43023 return true;
43024 }
43025@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43026 spin_unlock(&cookie->stores_lock);
43027
43028 if (xpage) {
43029- fscache_stat(&fscache_n_store_vmscan_cancelled);
43030- fscache_stat(&fscache_n_store_radix_deletes);
43031+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
43032+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43033 ASSERTCMP(xpage, ==, page);
43034 } else {
43035- fscache_stat(&fscache_n_store_vmscan_gone);
43036+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
43037 }
43038
43039 wake_up_bit(&cookie->flags, 0);
43040@@ -107,7 +107,7 @@ page_busy:
43041 /* we might want to wait here, but that could deadlock the allocator as
43042 * the work threads writing to the cache may all end up sleeping
43043 * on memory allocation */
43044- fscache_stat(&fscache_n_store_vmscan_busy);
43045+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43046 return false;
43047 }
43048 EXPORT_SYMBOL(__fscache_maybe_release_page);
43049@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
43050 FSCACHE_COOKIE_STORING_TAG);
43051 if (!radix_tree_tag_get(&cookie->stores, page->index,
43052 FSCACHE_COOKIE_PENDING_TAG)) {
43053- fscache_stat(&fscache_n_store_radix_deletes);
43054+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43055 xpage = radix_tree_delete(&cookie->stores, page->index);
43056 }
43057 spin_unlock(&cookie->stores_lock);
43058@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
43059
43060 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43061
43062- fscache_stat(&fscache_n_attr_changed_calls);
43063+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43064
43065 if (fscache_object_is_active(object)) {
43066 fscache_stat(&fscache_n_cop_attr_changed);
43067@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43068
43069 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43070
43071- fscache_stat(&fscache_n_attr_changed);
43072+ fscache_stat_unchecked(&fscache_n_attr_changed);
43073
43074 op = kzalloc(sizeof(*op), GFP_KERNEL);
43075 if (!op) {
43076- fscache_stat(&fscache_n_attr_changed_nomem);
43077+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43078 _leave(" = -ENOMEM");
43079 return -ENOMEM;
43080 }
43081@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43082 if (fscache_submit_exclusive_op(object, op) < 0)
43083 goto nobufs;
43084 spin_unlock(&cookie->lock);
43085- fscache_stat(&fscache_n_attr_changed_ok);
43086+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43087 fscache_put_operation(op);
43088 _leave(" = 0");
43089 return 0;
43090@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43091 nobufs:
43092 spin_unlock(&cookie->lock);
43093 kfree(op);
43094- fscache_stat(&fscache_n_attr_changed_nobufs);
43095+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43096 _leave(" = %d", -ENOBUFS);
43097 return -ENOBUFS;
43098 }
43099@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
43100 /* allocate a retrieval operation and attempt to submit it */
43101 op = kzalloc(sizeof(*op), GFP_NOIO);
43102 if (!op) {
43103- fscache_stat(&fscache_n_retrievals_nomem);
43104+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43105 return NULL;
43106 }
43107
43108@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43109 return 0;
43110 }
43111
43112- fscache_stat(&fscache_n_retrievals_wait);
43113+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
43114
43115 jif = jiffies;
43116 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43117 fscache_wait_bit_interruptible,
43118 TASK_INTERRUPTIBLE) != 0) {
43119- fscache_stat(&fscache_n_retrievals_intr);
43120+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43121 _leave(" = -ERESTARTSYS");
43122 return -ERESTARTSYS;
43123 }
43124@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43125 */
43126 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43127 struct fscache_retrieval *op,
43128- atomic_t *stat_op_waits,
43129- atomic_t *stat_object_dead)
43130+ atomic_unchecked_t *stat_op_waits,
43131+ atomic_unchecked_t *stat_object_dead)
43132 {
43133 int ret;
43134
43135@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43136 goto check_if_dead;
43137
43138 _debug(">>> WT");
43139- fscache_stat(stat_op_waits);
43140+ fscache_stat_unchecked(stat_op_waits);
43141 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43142 fscache_wait_bit_interruptible,
43143 TASK_INTERRUPTIBLE) < 0) {
43144@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43145
43146 check_if_dead:
43147 if (unlikely(fscache_object_is_dead(object))) {
43148- fscache_stat(stat_object_dead);
43149+ fscache_stat_unchecked(stat_object_dead);
43150 return -ENOBUFS;
43151 }
43152 return 0;
43153@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43154
43155 _enter("%p,%p,,,", cookie, page);
43156
43157- fscache_stat(&fscache_n_retrievals);
43158+ fscache_stat_unchecked(&fscache_n_retrievals);
43159
43160 if (hlist_empty(&cookie->backing_objects))
43161 goto nobufs;
43162@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43163 goto nobufs_unlock;
43164 spin_unlock(&cookie->lock);
43165
43166- fscache_stat(&fscache_n_retrieval_ops);
43167+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43168
43169 /* pin the netfs read context in case we need to do the actual netfs
43170 * read because we've encountered a cache read failure */
43171@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43172
43173 error:
43174 if (ret == -ENOMEM)
43175- fscache_stat(&fscache_n_retrievals_nomem);
43176+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43177 else if (ret == -ERESTARTSYS)
43178- fscache_stat(&fscache_n_retrievals_intr);
43179+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43180 else if (ret == -ENODATA)
43181- fscache_stat(&fscache_n_retrievals_nodata);
43182+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43183 else if (ret < 0)
43184- fscache_stat(&fscache_n_retrievals_nobufs);
43185+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43186 else
43187- fscache_stat(&fscache_n_retrievals_ok);
43188+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43189
43190 fscache_put_retrieval(op);
43191 _leave(" = %d", ret);
43192@@ -429,7 +429,7 @@ nobufs_unlock:
43193 spin_unlock(&cookie->lock);
43194 kfree(op);
43195 nobufs:
43196- fscache_stat(&fscache_n_retrievals_nobufs);
43197+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43198 _leave(" = -ENOBUFS");
43199 return -ENOBUFS;
43200 }
43201@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43202
43203 _enter("%p,,%d,,,", cookie, *nr_pages);
43204
43205- fscache_stat(&fscache_n_retrievals);
43206+ fscache_stat_unchecked(&fscache_n_retrievals);
43207
43208 if (hlist_empty(&cookie->backing_objects))
43209 goto nobufs;
43210@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43211 goto nobufs_unlock;
43212 spin_unlock(&cookie->lock);
43213
43214- fscache_stat(&fscache_n_retrieval_ops);
43215+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43216
43217 /* pin the netfs read context in case we need to do the actual netfs
43218 * read because we've encountered a cache read failure */
43219@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43220
43221 error:
43222 if (ret == -ENOMEM)
43223- fscache_stat(&fscache_n_retrievals_nomem);
43224+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43225 else if (ret == -ERESTARTSYS)
43226- fscache_stat(&fscache_n_retrievals_intr);
43227+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43228 else if (ret == -ENODATA)
43229- fscache_stat(&fscache_n_retrievals_nodata);
43230+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43231 else if (ret < 0)
43232- fscache_stat(&fscache_n_retrievals_nobufs);
43233+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43234 else
43235- fscache_stat(&fscache_n_retrievals_ok);
43236+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43237
43238 fscache_put_retrieval(op);
43239 _leave(" = %d", ret);
43240@@ -545,7 +545,7 @@ nobufs_unlock:
43241 spin_unlock(&cookie->lock);
43242 kfree(op);
43243 nobufs:
43244- fscache_stat(&fscache_n_retrievals_nobufs);
43245+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43246 _leave(" = -ENOBUFS");
43247 return -ENOBUFS;
43248 }
43249@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43250
43251 _enter("%p,%p,,,", cookie, page);
43252
43253- fscache_stat(&fscache_n_allocs);
43254+ fscache_stat_unchecked(&fscache_n_allocs);
43255
43256 if (hlist_empty(&cookie->backing_objects))
43257 goto nobufs;
43258@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43259 goto nobufs_unlock;
43260 spin_unlock(&cookie->lock);
43261
43262- fscache_stat(&fscache_n_alloc_ops);
43263+ fscache_stat_unchecked(&fscache_n_alloc_ops);
43264
43265 ret = fscache_wait_for_retrieval_activation(
43266 object, op,
43267@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43268
43269 error:
43270 if (ret == -ERESTARTSYS)
43271- fscache_stat(&fscache_n_allocs_intr);
43272+ fscache_stat_unchecked(&fscache_n_allocs_intr);
43273 else if (ret < 0)
43274- fscache_stat(&fscache_n_allocs_nobufs);
43275+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43276 else
43277- fscache_stat(&fscache_n_allocs_ok);
43278+ fscache_stat_unchecked(&fscache_n_allocs_ok);
43279
43280 fscache_put_retrieval(op);
43281 _leave(" = %d", ret);
43282@@ -625,7 +625,7 @@ nobufs_unlock:
43283 spin_unlock(&cookie->lock);
43284 kfree(op);
43285 nobufs:
43286- fscache_stat(&fscache_n_allocs_nobufs);
43287+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43288 _leave(" = -ENOBUFS");
43289 return -ENOBUFS;
43290 }
43291@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43292
43293 spin_lock(&cookie->stores_lock);
43294
43295- fscache_stat(&fscache_n_store_calls);
43296+ fscache_stat_unchecked(&fscache_n_store_calls);
43297
43298 /* find a page to store */
43299 page = NULL;
43300@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43301 page = results[0];
43302 _debug("gang %d [%lx]", n, page->index);
43303 if (page->index > op->store_limit) {
43304- fscache_stat(&fscache_n_store_pages_over_limit);
43305+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43306 goto superseded;
43307 }
43308
43309@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43310 spin_unlock(&cookie->stores_lock);
43311 spin_unlock(&object->lock);
43312
43313- fscache_stat(&fscache_n_store_pages);
43314+ fscache_stat_unchecked(&fscache_n_store_pages);
43315 fscache_stat(&fscache_n_cop_write_page);
43316 ret = object->cache->ops->write_page(op, page);
43317 fscache_stat_d(&fscache_n_cop_write_page);
43318@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43319 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43320 ASSERT(PageFsCache(page));
43321
43322- fscache_stat(&fscache_n_stores);
43323+ fscache_stat_unchecked(&fscache_n_stores);
43324
43325 op = kzalloc(sizeof(*op), GFP_NOIO);
43326 if (!op)
43327@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43328 spin_unlock(&cookie->stores_lock);
43329 spin_unlock(&object->lock);
43330
43331- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43332+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43333 op->store_limit = object->store_limit;
43334
43335 if (fscache_submit_op(object, &op->op) < 0)
43336@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43337
43338 spin_unlock(&cookie->lock);
43339 radix_tree_preload_end();
43340- fscache_stat(&fscache_n_store_ops);
43341- fscache_stat(&fscache_n_stores_ok);
43342+ fscache_stat_unchecked(&fscache_n_store_ops);
43343+ fscache_stat_unchecked(&fscache_n_stores_ok);
43344
43345 /* the work queue now carries its own ref on the object */
43346 fscache_put_operation(&op->op);
43347@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43348 return 0;
43349
43350 already_queued:
43351- fscache_stat(&fscache_n_stores_again);
43352+ fscache_stat_unchecked(&fscache_n_stores_again);
43353 already_pending:
43354 spin_unlock(&cookie->stores_lock);
43355 spin_unlock(&object->lock);
43356 spin_unlock(&cookie->lock);
43357 radix_tree_preload_end();
43358 kfree(op);
43359- fscache_stat(&fscache_n_stores_ok);
43360+ fscache_stat_unchecked(&fscache_n_stores_ok);
43361 _leave(" = 0");
43362 return 0;
43363
43364@@ -851,14 +851,14 @@ nobufs:
43365 spin_unlock(&cookie->lock);
43366 radix_tree_preload_end();
43367 kfree(op);
43368- fscache_stat(&fscache_n_stores_nobufs);
43369+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
43370 _leave(" = -ENOBUFS");
43371 return -ENOBUFS;
43372
43373 nomem_free:
43374 kfree(op);
43375 nomem:
43376- fscache_stat(&fscache_n_stores_oom);
43377+ fscache_stat_unchecked(&fscache_n_stores_oom);
43378 _leave(" = -ENOMEM");
43379 return -ENOMEM;
43380 }
43381@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
43382 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43383 ASSERTCMP(page, !=, NULL);
43384
43385- fscache_stat(&fscache_n_uncaches);
43386+ fscache_stat_unchecked(&fscache_n_uncaches);
43387
43388 /* cache withdrawal may beat us to it */
43389 if (!PageFsCache(page))
43390@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
43391 unsigned long loop;
43392
43393 #ifdef CONFIG_FSCACHE_STATS
43394- atomic_add(pagevec->nr, &fscache_n_marks);
43395+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43396 #endif
43397
43398 for (loop = 0; loop < pagevec->nr; loop++) {
43399diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
43400index 4765190..2a067f2 100644
43401--- a/fs/fscache/stats.c
43402+++ b/fs/fscache/stats.c
43403@@ -18,95 +18,95 @@
43404 /*
43405 * operation counters
43406 */
43407-atomic_t fscache_n_op_pend;
43408-atomic_t fscache_n_op_run;
43409-atomic_t fscache_n_op_enqueue;
43410-atomic_t fscache_n_op_requeue;
43411-atomic_t fscache_n_op_deferred_release;
43412-atomic_t fscache_n_op_release;
43413-atomic_t fscache_n_op_gc;
43414-atomic_t fscache_n_op_cancelled;
43415-atomic_t fscache_n_op_rejected;
43416+atomic_unchecked_t fscache_n_op_pend;
43417+atomic_unchecked_t fscache_n_op_run;
43418+atomic_unchecked_t fscache_n_op_enqueue;
43419+atomic_unchecked_t fscache_n_op_requeue;
43420+atomic_unchecked_t fscache_n_op_deferred_release;
43421+atomic_unchecked_t fscache_n_op_release;
43422+atomic_unchecked_t fscache_n_op_gc;
43423+atomic_unchecked_t fscache_n_op_cancelled;
43424+atomic_unchecked_t fscache_n_op_rejected;
43425
43426-atomic_t fscache_n_attr_changed;
43427-atomic_t fscache_n_attr_changed_ok;
43428-atomic_t fscache_n_attr_changed_nobufs;
43429-atomic_t fscache_n_attr_changed_nomem;
43430-atomic_t fscache_n_attr_changed_calls;
43431+atomic_unchecked_t fscache_n_attr_changed;
43432+atomic_unchecked_t fscache_n_attr_changed_ok;
43433+atomic_unchecked_t fscache_n_attr_changed_nobufs;
43434+atomic_unchecked_t fscache_n_attr_changed_nomem;
43435+atomic_unchecked_t fscache_n_attr_changed_calls;
43436
43437-atomic_t fscache_n_allocs;
43438-atomic_t fscache_n_allocs_ok;
43439-atomic_t fscache_n_allocs_wait;
43440-atomic_t fscache_n_allocs_nobufs;
43441-atomic_t fscache_n_allocs_intr;
43442-atomic_t fscache_n_allocs_object_dead;
43443-atomic_t fscache_n_alloc_ops;
43444-atomic_t fscache_n_alloc_op_waits;
43445+atomic_unchecked_t fscache_n_allocs;
43446+atomic_unchecked_t fscache_n_allocs_ok;
43447+atomic_unchecked_t fscache_n_allocs_wait;
43448+atomic_unchecked_t fscache_n_allocs_nobufs;
43449+atomic_unchecked_t fscache_n_allocs_intr;
43450+atomic_unchecked_t fscache_n_allocs_object_dead;
43451+atomic_unchecked_t fscache_n_alloc_ops;
43452+atomic_unchecked_t fscache_n_alloc_op_waits;
43453
43454-atomic_t fscache_n_retrievals;
43455-atomic_t fscache_n_retrievals_ok;
43456-atomic_t fscache_n_retrievals_wait;
43457-atomic_t fscache_n_retrievals_nodata;
43458-atomic_t fscache_n_retrievals_nobufs;
43459-atomic_t fscache_n_retrievals_intr;
43460-atomic_t fscache_n_retrievals_nomem;
43461-atomic_t fscache_n_retrievals_object_dead;
43462-atomic_t fscache_n_retrieval_ops;
43463-atomic_t fscache_n_retrieval_op_waits;
43464+atomic_unchecked_t fscache_n_retrievals;
43465+atomic_unchecked_t fscache_n_retrievals_ok;
43466+atomic_unchecked_t fscache_n_retrievals_wait;
43467+atomic_unchecked_t fscache_n_retrievals_nodata;
43468+atomic_unchecked_t fscache_n_retrievals_nobufs;
43469+atomic_unchecked_t fscache_n_retrievals_intr;
43470+atomic_unchecked_t fscache_n_retrievals_nomem;
43471+atomic_unchecked_t fscache_n_retrievals_object_dead;
43472+atomic_unchecked_t fscache_n_retrieval_ops;
43473+atomic_unchecked_t fscache_n_retrieval_op_waits;
43474
43475-atomic_t fscache_n_stores;
43476-atomic_t fscache_n_stores_ok;
43477-atomic_t fscache_n_stores_again;
43478-atomic_t fscache_n_stores_nobufs;
43479-atomic_t fscache_n_stores_oom;
43480-atomic_t fscache_n_store_ops;
43481-atomic_t fscache_n_store_calls;
43482-atomic_t fscache_n_store_pages;
43483-atomic_t fscache_n_store_radix_deletes;
43484-atomic_t fscache_n_store_pages_over_limit;
43485+atomic_unchecked_t fscache_n_stores;
43486+atomic_unchecked_t fscache_n_stores_ok;
43487+atomic_unchecked_t fscache_n_stores_again;
43488+atomic_unchecked_t fscache_n_stores_nobufs;
43489+atomic_unchecked_t fscache_n_stores_oom;
43490+atomic_unchecked_t fscache_n_store_ops;
43491+atomic_unchecked_t fscache_n_store_calls;
43492+atomic_unchecked_t fscache_n_store_pages;
43493+atomic_unchecked_t fscache_n_store_radix_deletes;
43494+atomic_unchecked_t fscache_n_store_pages_over_limit;
43495
43496-atomic_t fscache_n_store_vmscan_not_storing;
43497-atomic_t fscache_n_store_vmscan_gone;
43498-atomic_t fscache_n_store_vmscan_busy;
43499-atomic_t fscache_n_store_vmscan_cancelled;
43500+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43501+atomic_unchecked_t fscache_n_store_vmscan_gone;
43502+atomic_unchecked_t fscache_n_store_vmscan_busy;
43503+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43504
43505-atomic_t fscache_n_marks;
43506-atomic_t fscache_n_uncaches;
43507+atomic_unchecked_t fscache_n_marks;
43508+atomic_unchecked_t fscache_n_uncaches;
43509
43510-atomic_t fscache_n_acquires;
43511-atomic_t fscache_n_acquires_null;
43512-atomic_t fscache_n_acquires_no_cache;
43513-atomic_t fscache_n_acquires_ok;
43514-atomic_t fscache_n_acquires_nobufs;
43515-atomic_t fscache_n_acquires_oom;
43516+atomic_unchecked_t fscache_n_acquires;
43517+atomic_unchecked_t fscache_n_acquires_null;
43518+atomic_unchecked_t fscache_n_acquires_no_cache;
43519+atomic_unchecked_t fscache_n_acquires_ok;
43520+atomic_unchecked_t fscache_n_acquires_nobufs;
43521+atomic_unchecked_t fscache_n_acquires_oom;
43522
43523-atomic_t fscache_n_updates;
43524-atomic_t fscache_n_updates_null;
43525-atomic_t fscache_n_updates_run;
43526+atomic_unchecked_t fscache_n_updates;
43527+atomic_unchecked_t fscache_n_updates_null;
43528+atomic_unchecked_t fscache_n_updates_run;
43529
43530-atomic_t fscache_n_relinquishes;
43531-atomic_t fscache_n_relinquishes_null;
43532-atomic_t fscache_n_relinquishes_waitcrt;
43533-atomic_t fscache_n_relinquishes_retire;
43534+atomic_unchecked_t fscache_n_relinquishes;
43535+atomic_unchecked_t fscache_n_relinquishes_null;
43536+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43537+atomic_unchecked_t fscache_n_relinquishes_retire;
43538
43539-atomic_t fscache_n_cookie_index;
43540-atomic_t fscache_n_cookie_data;
43541-atomic_t fscache_n_cookie_special;
43542+atomic_unchecked_t fscache_n_cookie_index;
43543+atomic_unchecked_t fscache_n_cookie_data;
43544+atomic_unchecked_t fscache_n_cookie_special;
43545
43546-atomic_t fscache_n_object_alloc;
43547-atomic_t fscache_n_object_no_alloc;
43548-atomic_t fscache_n_object_lookups;
43549-atomic_t fscache_n_object_lookups_negative;
43550-atomic_t fscache_n_object_lookups_positive;
43551-atomic_t fscache_n_object_lookups_timed_out;
43552-atomic_t fscache_n_object_created;
43553-atomic_t fscache_n_object_avail;
43554-atomic_t fscache_n_object_dead;
43555+atomic_unchecked_t fscache_n_object_alloc;
43556+atomic_unchecked_t fscache_n_object_no_alloc;
43557+atomic_unchecked_t fscache_n_object_lookups;
43558+atomic_unchecked_t fscache_n_object_lookups_negative;
43559+atomic_unchecked_t fscache_n_object_lookups_positive;
43560+atomic_unchecked_t fscache_n_object_lookups_timed_out;
43561+atomic_unchecked_t fscache_n_object_created;
43562+atomic_unchecked_t fscache_n_object_avail;
43563+atomic_unchecked_t fscache_n_object_dead;
43564
43565-atomic_t fscache_n_checkaux_none;
43566-atomic_t fscache_n_checkaux_okay;
43567-atomic_t fscache_n_checkaux_update;
43568-atomic_t fscache_n_checkaux_obsolete;
43569+atomic_unchecked_t fscache_n_checkaux_none;
43570+atomic_unchecked_t fscache_n_checkaux_okay;
43571+atomic_unchecked_t fscache_n_checkaux_update;
43572+atomic_unchecked_t fscache_n_checkaux_obsolete;
43573
43574 atomic_t fscache_n_cop_alloc_object;
43575 atomic_t fscache_n_cop_lookup_object;
43576@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
43577 seq_puts(m, "FS-Cache statistics\n");
43578
43579 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43580- atomic_read(&fscache_n_cookie_index),
43581- atomic_read(&fscache_n_cookie_data),
43582- atomic_read(&fscache_n_cookie_special));
43583+ atomic_read_unchecked(&fscache_n_cookie_index),
43584+ atomic_read_unchecked(&fscache_n_cookie_data),
43585+ atomic_read_unchecked(&fscache_n_cookie_special));
43586
43587 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43588- atomic_read(&fscache_n_object_alloc),
43589- atomic_read(&fscache_n_object_no_alloc),
43590- atomic_read(&fscache_n_object_avail),
43591- atomic_read(&fscache_n_object_dead));
43592+ atomic_read_unchecked(&fscache_n_object_alloc),
43593+ atomic_read_unchecked(&fscache_n_object_no_alloc),
43594+ atomic_read_unchecked(&fscache_n_object_avail),
43595+ atomic_read_unchecked(&fscache_n_object_dead));
43596 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43597- atomic_read(&fscache_n_checkaux_none),
43598- atomic_read(&fscache_n_checkaux_okay),
43599- atomic_read(&fscache_n_checkaux_update),
43600- atomic_read(&fscache_n_checkaux_obsolete));
43601+ atomic_read_unchecked(&fscache_n_checkaux_none),
43602+ atomic_read_unchecked(&fscache_n_checkaux_okay),
43603+ atomic_read_unchecked(&fscache_n_checkaux_update),
43604+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43605
43606 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43607- atomic_read(&fscache_n_marks),
43608- atomic_read(&fscache_n_uncaches));
43609+ atomic_read_unchecked(&fscache_n_marks),
43610+ atomic_read_unchecked(&fscache_n_uncaches));
43611
43612 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43613 " oom=%u\n",
43614- atomic_read(&fscache_n_acquires),
43615- atomic_read(&fscache_n_acquires_null),
43616- atomic_read(&fscache_n_acquires_no_cache),
43617- atomic_read(&fscache_n_acquires_ok),
43618- atomic_read(&fscache_n_acquires_nobufs),
43619- atomic_read(&fscache_n_acquires_oom));
43620+ atomic_read_unchecked(&fscache_n_acquires),
43621+ atomic_read_unchecked(&fscache_n_acquires_null),
43622+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
43623+ atomic_read_unchecked(&fscache_n_acquires_ok),
43624+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
43625+ atomic_read_unchecked(&fscache_n_acquires_oom));
43626
43627 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43628- atomic_read(&fscache_n_object_lookups),
43629- atomic_read(&fscache_n_object_lookups_negative),
43630- atomic_read(&fscache_n_object_lookups_positive),
43631- atomic_read(&fscache_n_object_created),
43632- atomic_read(&fscache_n_object_lookups_timed_out));
43633+ atomic_read_unchecked(&fscache_n_object_lookups),
43634+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
43635+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
43636+ atomic_read_unchecked(&fscache_n_object_created),
43637+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
43638
43639 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43640- atomic_read(&fscache_n_updates),
43641- atomic_read(&fscache_n_updates_null),
43642- atomic_read(&fscache_n_updates_run));
43643+ atomic_read_unchecked(&fscache_n_updates),
43644+ atomic_read_unchecked(&fscache_n_updates_null),
43645+ atomic_read_unchecked(&fscache_n_updates_run));
43646
43647 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43648- atomic_read(&fscache_n_relinquishes),
43649- atomic_read(&fscache_n_relinquishes_null),
43650- atomic_read(&fscache_n_relinquishes_waitcrt),
43651- atomic_read(&fscache_n_relinquishes_retire));
43652+ atomic_read_unchecked(&fscache_n_relinquishes),
43653+ atomic_read_unchecked(&fscache_n_relinquishes_null),
43654+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43655+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
43656
43657 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43658- atomic_read(&fscache_n_attr_changed),
43659- atomic_read(&fscache_n_attr_changed_ok),
43660- atomic_read(&fscache_n_attr_changed_nobufs),
43661- atomic_read(&fscache_n_attr_changed_nomem),
43662- atomic_read(&fscache_n_attr_changed_calls));
43663+ atomic_read_unchecked(&fscache_n_attr_changed),
43664+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
43665+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43666+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43667+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
43668
43669 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43670- atomic_read(&fscache_n_allocs),
43671- atomic_read(&fscache_n_allocs_ok),
43672- atomic_read(&fscache_n_allocs_wait),
43673- atomic_read(&fscache_n_allocs_nobufs),
43674- atomic_read(&fscache_n_allocs_intr));
43675+ atomic_read_unchecked(&fscache_n_allocs),
43676+ atomic_read_unchecked(&fscache_n_allocs_ok),
43677+ atomic_read_unchecked(&fscache_n_allocs_wait),
43678+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
43679+ atomic_read_unchecked(&fscache_n_allocs_intr));
43680 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43681- atomic_read(&fscache_n_alloc_ops),
43682- atomic_read(&fscache_n_alloc_op_waits),
43683- atomic_read(&fscache_n_allocs_object_dead));
43684+ atomic_read_unchecked(&fscache_n_alloc_ops),
43685+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
43686+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
43687
43688 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43689 " int=%u oom=%u\n",
43690- atomic_read(&fscache_n_retrievals),
43691- atomic_read(&fscache_n_retrievals_ok),
43692- atomic_read(&fscache_n_retrievals_wait),
43693- atomic_read(&fscache_n_retrievals_nodata),
43694- atomic_read(&fscache_n_retrievals_nobufs),
43695- atomic_read(&fscache_n_retrievals_intr),
43696- atomic_read(&fscache_n_retrievals_nomem));
43697+ atomic_read_unchecked(&fscache_n_retrievals),
43698+ atomic_read_unchecked(&fscache_n_retrievals_ok),
43699+ atomic_read_unchecked(&fscache_n_retrievals_wait),
43700+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
43701+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43702+ atomic_read_unchecked(&fscache_n_retrievals_intr),
43703+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
43704 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43705- atomic_read(&fscache_n_retrieval_ops),
43706- atomic_read(&fscache_n_retrieval_op_waits),
43707- atomic_read(&fscache_n_retrievals_object_dead));
43708+ atomic_read_unchecked(&fscache_n_retrieval_ops),
43709+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43710+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43711
43712 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43713- atomic_read(&fscache_n_stores),
43714- atomic_read(&fscache_n_stores_ok),
43715- atomic_read(&fscache_n_stores_again),
43716- atomic_read(&fscache_n_stores_nobufs),
43717- atomic_read(&fscache_n_stores_oom));
43718+ atomic_read_unchecked(&fscache_n_stores),
43719+ atomic_read_unchecked(&fscache_n_stores_ok),
43720+ atomic_read_unchecked(&fscache_n_stores_again),
43721+ atomic_read_unchecked(&fscache_n_stores_nobufs),
43722+ atomic_read_unchecked(&fscache_n_stores_oom));
43723 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43724- atomic_read(&fscache_n_store_ops),
43725- atomic_read(&fscache_n_store_calls),
43726- atomic_read(&fscache_n_store_pages),
43727- atomic_read(&fscache_n_store_radix_deletes),
43728- atomic_read(&fscache_n_store_pages_over_limit));
43729+ atomic_read_unchecked(&fscache_n_store_ops),
43730+ atomic_read_unchecked(&fscache_n_store_calls),
43731+ atomic_read_unchecked(&fscache_n_store_pages),
43732+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
43733+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43734
43735 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43736- atomic_read(&fscache_n_store_vmscan_not_storing),
43737- atomic_read(&fscache_n_store_vmscan_gone),
43738- atomic_read(&fscache_n_store_vmscan_busy),
43739- atomic_read(&fscache_n_store_vmscan_cancelled));
43740+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43741+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43742+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43743+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43744
43745 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43746- atomic_read(&fscache_n_op_pend),
43747- atomic_read(&fscache_n_op_run),
43748- atomic_read(&fscache_n_op_enqueue),
43749- atomic_read(&fscache_n_op_cancelled),
43750- atomic_read(&fscache_n_op_rejected));
43751+ atomic_read_unchecked(&fscache_n_op_pend),
43752+ atomic_read_unchecked(&fscache_n_op_run),
43753+ atomic_read_unchecked(&fscache_n_op_enqueue),
43754+ atomic_read_unchecked(&fscache_n_op_cancelled),
43755+ atomic_read_unchecked(&fscache_n_op_rejected));
43756 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43757- atomic_read(&fscache_n_op_deferred_release),
43758- atomic_read(&fscache_n_op_release),
43759- atomic_read(&fscache_n_op_gc));
43760+ atomic_read_unchecked(&fscache_n_op_deferred_release),
43761+ atomic_read_unchecked(&fscache_n_op_release),
43762+ atomic_read_unchecked(&fscache_n_op_gc));
43763
43764 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43765 atomic_read(&fscache_n_cop_alloc_object),
43766diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
43767index 3426521..3b75162 100644
43768--- a/fs/fuse/cuse.c
43769+++ b/fs/fuse/cuse.c
43770@@ -587,10 +587,12 @@ static int __init cuse_init(void)
43771 INIT_LIST_HEAD(&cuse_conntbl[i]);
43772
43773 /* inherit and extend fuse_dev_operations */
43774- cuse_channel_fops = fuse_dev_operations;
43775- cuse_channel_fops.owner = THIS_MODULE;
43776- cuse_channel_fops.open = cuse_channel_open;
43777- cuse_channel_fops.release = cuse_channel_release;
43778+ pax_open_kernel();
43779+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43780+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43781+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
43782+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
43783+ pax_close_kernel();
43784
43785 cuse_class = class_create(THIS_MODULE, "cuse");
43786 if (IS_ERR(cuse_class))
43787diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
43788index 2aaf3ea..8e50863 100644
43789--- a/fs/fuse/dev.c
43790+++ b/fs/fuse/dev.c
43791@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
43792 ret = 0;
43793 pipe_lock(pipe);
43794
43795- if (!pipe->readers) {
43796+ if (!atomic_read(&pipe->readers)) {
43797 send_sig(SIGPIPE, current, 0);
43798 if (!ret)
43799 ret = -EPIPE;
43800diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
43801index 9f63e49..d8a64c0 100644
43802--- a/fs/fuse/dir.c
43803+++ b/fs/fuse/dir.c
43804@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
43805 return link;
43806 }
43807
43808-static void free_link(char *link)
43809+static void free_link(const char *link)
43810 {
43811 if (!IS_ERR(link))
43812 free_page((unsigned long) link);
43813diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
43814index cfd4959..a780959 100644
43815--- a/fs/gfs2/inode.c
43816+++ b/fs/gfs2/inode.c
43817@@ -1490,7 +1490,7 @@ out:
43818
43819 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43820 {
43821- char *s = nd_get_link(nd);
43822+ const char *s = nd_get_link(nd);
43823 if (!IS_ERR(s))
43824 kfree(s);
43825 }
43826diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
43827index 0be5a78..9cfb853 100644
43828--- a/fs/hugetlbfs/inode.c
43829+++ b/fs/hugetlbfs/inode.c
43830@@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
43831 .kill_sb = kill_litter_super,
43832 };
43833
43834-static struct vfsmount *hugetlbfs_vfsmount;
43835+struct vfsmount *hugetlbfs_vfsmount;
43836
43837 static int can_do_hugetlb_shm(void)
43838 {
43839diff --git a/fs/inode.c b/fs/inode.c
43840index ee4e66b..0451521 100644
43841--- a/fs/inode.c
43842+++ b/fs/inode.c
43843@@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
43844
43845 #ifdef CONFIG_SMP
43846 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
43847- static atomic_t shared_last_ino;
43848- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
43849+ static atomic_unchecked_t shared_last_ino;
43850+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
43851
43852 res = next - LAST_INO_BATCH;
43853 }
43854diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
43855index e513f19..2ab1351 100644
43856--- a/fs/jffs2/erase.c
43857+++ b/fs/jffs2/erase.c
43858@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
43859 struct jffs2_unknown_node marker = {
43860 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43861 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43862- .totlen = cpu_to_je32(c->cleanmarker_size)
43863+ .totlen = cpu_to_je32(c->cleanmarker_size),
43864+ .hdr_crc = cpu_to_je32(0)
43865 };
43866
43867 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43868diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
43869index b09e51d..e482afa 100644
43870--- a/fs/jffs2/wbuf.c
43871+++ b/fs/jffs2/wbuf.c
43872@@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
43873 {
43874 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43875 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43876- .totlen = constant_cpu_to_je32(8)
43877+ .totlen = constant_cpu_to_je32(8),
43878+ .hdr_crc = constant_cpu_to_je32(0)
43879 };
43880
43881 /*
43882diff --git a/fs/jfs/super.c b/fs/jfs/super.c
43883index a44eff0..462e07d 100644
43884--- a/fs/jfs/super.c
43885+++ b/fs/jfs/super.c
43886@@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
43887
43888 jfs_inode_cachep =
43889 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43890- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43891+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43892 init_once);
43893 if (jfs_inode_cachep == NULL)
43894 return -ENOMEM;
43895diff --git a/fs/libfs.c b/fs/libfs.c
43896index f6d411e..e82a08d 100644
43897--- a/fs/libfs.c
43898+++ b/fs/libfs.c
43899@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
43900
43901 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43902 struct dentry *next;
43903+ char d_name[sizeof(next->d_iname)];
43904+ const unsigned char *name;
43905+
43906 next = list_entry(p, struct dentry, d_u.d_child);
43907 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
43908 if (!simple_positive(next)) {
43909@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
43910
43911 spin_unlock(&next->d_lock);
43912 spin_unlock(&dentry->d_lock);
43913- if (filldir(dirent, next->d_name.name,
43914+ name = next->d_name.name;
43915+ if (name == next->d_iname) {
43916+ memcpy(d_name, name, next->d_name.len);
43917+ name = d_name;
43918+ }
43919+ if (filldir(dirent, name,
43920 next->d_name.len, filp->f_pos,
43921 next->d_inode->i_ino,
43922 dt_type(next->d_inode)) < 0)
43923diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
43924index 8392cb8..80d6193 100644
43925--- a/fs/lockd/clntproc.c
43926+++ b/fs/lockd/clntproc.c
43927@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
43928 /*
43929 * Cookie counter for NLM requests
43930 */
43931-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43932+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43933
43934 void nlmclnt_next_cookie(struct nlm_cookie *c)
43935 {
43936- u32 cookie = atomic_inc_return(&nlm_cookie);
43937+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43938
43939 memcpy(c->data, &cookie, 4);
43940 c->len=4;
43941diff --git a/fs/locks.c b/fs/locks.c
43942index 637694b..f84a121 100644
43943--- a/fs/locks.c
43944+++ b/fs/locks.c
43945@@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
43946 return;
43947
43948 if (filp->f_op && filp->f_op->flock) {
43949- struct file_lock fl = {
43950+ struct file_lock flock = {
43951 .fl_pid = current->tgid,
43952 .fl_file = filp,
43953 .fl_flags = FL_FLOCK,
43954 .fl_type = F_UNLCK,
43955 .fl_end = OFFSET_MAX,
43956 };
43957- filp->f_op->flock(filp, F_SETLKW, &fl);
43958- if (fl.fl_ops && fl.fl_ops->fl_release_private)
43959- fl.fl_ops->fl_release_private(&fl);
43960+ filp->f_op->flock(filp, F_SETLKW, &flock);
43961+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
43962+ flock.fl_ops->fl_release_private(&flock);
43963 }
43964
43965 lock_flocks();
43966diff --git a/fs/namei.c b/fs/namei.c
43967index 5008f01..90328a7 100644
43968--- a/fs/namei.c
43969+++ b/fs/namei.c
43970@@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
43971 if (ret != -EACCES)
43972 return ret;
43973
43974+#ifdef CONFIG_GRKERNSEC
43975+ /* we'll block if we have to log due to a denied capability use */
43976+ if (mask & MAY_NOT_BLOCK)
43977+ return -ECHILD;
43978+#endif
43979+
43980 if (S_ISDIR(inode->i_mode)) {
43981 /* DACs are overridable for directories */
43982- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
43983- return 0;
43984 if (!(mask & MAY_WRITE))
43985- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
43986+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
43987+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
43988 return 0;
43989+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
43990+ return 0;
43991 return -EACCES;
43992 }
43993 /*
43994+ * Searching includes executable on directories, else just read.
43995+ */
43996+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43997+ if (mask == MAY_READ)
43998+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
43999+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44000+ return 0;
44001+
44002+ /*
44003 * Read/write DACs are always overridable.
44004 * Executable DACs are overridable when there is
44005 * at least one exec bit set.
44006@@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
44007 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44008 return 0;
44009
44010- /*
44011- * Searching includes executable on directories, else just read.
44012- */
44013- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44014- if (mask == MAY_READ)
44015- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44016- return 0;
44017-
44018 return -EACCES;
44019 }
44020
44021@@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
44022 return error;
44023 }
44024
44025+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
44026+ dentry->d_inode, dentry, nd->path.mnt)) {
44027+ error = -EACCES;
44028+ *p = ERR_PTR(error); /* no ->put_link(), please */
44029+ path_put(&nd->path);
44030+ return error;
44031+ }
44032+
44033 nd->last_type = LAST_BIND;
44034 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44035 error = PTR_ERR(*p);
44036 if (!IS_ERR(*p)) {
44037- char *s = nd_get_link(nd);
44038+ const char *s = nd_get_link(nd);
44039 error = 0;
44040 if (s)
44041 error = __vfs_follow_link(nd, s);
44042@@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
44043 if (!err)
44044 err = complete_walk(nd);
44045
44046+ if (!(nd->flags & LOOKUP_PARENT)) {
44047+#ifdef CONFIG_GRKERNSEC
44048+ if (flags & LOOKUP_RCU) {
44049+ if (!err)
44050+ path_put(&nd->path);
44051+ err = -ECHILD;
44052+ } else
44053+#endif
44054+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44055+ if (!err)
44056+ path_put(&nd->path);
44057+ err = -ENOENT;
44058+ }
44059+ }
44060+
44061 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44062 if (!nd->inode->i_op->lookup) {
44063 path_put(&nd->path);
44064@@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
44065 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44066
44067 if (likely(!retval)) {
44068+ if (*name != '/' && nd->path.dentry && nd->inode) {
44069+#ifdef CONFIG_GRKERNSEC
44070+ if (flags & LOOKUP_RCU)
44071+ return -ECHILD;
44072+#endif
44073+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44074+ return -ENOENT;
44075+ }
44076+
44077 if (unlikely(!audit_dummy_context())) {
44078 if (nd->path.dentry && nd->inode)
44079 audit_inode(name, nd->path.dentry);
44080@@ -2046,6 +2086,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
44081 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
44082 return -EPERM;
44083
44084+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
44085+ return -EPERM;
44086+ if (gr_handle_rawio(inode))
44087+ return -EPERM;
44088+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
44089+ return -EACCES;
44090+
44091 return 0;
44092 }
44093
44094@@ -2107,6 +2154,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44095 error = complete_walk(nd);
44096 if (error)
44097 return ERR_PTR(error);
44098+#ifdef CONFIG_GRKERNSEC
44099+ if (nd->flags & LOOKUP_RCU) {
44100+ error = -ECHILD;
44101+ goto exit;
44102+ }
44103+#endif
44104+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44105+ error = -ENOENT;
44106+ goto exit;
44107+ }
44108 audit_inode(pathname, nd->path.dentry);
44109 if (open_flag & O_CREAT) {
44110 error = -EISDIR;
44111@@ -2117,6 +2174,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44112 error = complete_walk(nd);
44113 if (error)
44114 return ERR_PTR(error);
44115+#ifdef CONFIG_GRKERNSEC
44116+ if (nd->flags & LOOKUP_RCU) {
44117+ error = -ECHILD;
44118+ goto exit;
44119+ }
44120+#endif
44121+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44122+ error = -ENOENT;
44123+ goto exit;
44124+ }
44125 audit_inode(pathname, dir);
44126 goto ok;
44127 }
44128@@ -2138,6 +2205,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44129 error = complete_walk(nd);
44130 if (error)
44131 return ERR_PTR(-ECHILD);
44132+#ifdef CONFIG_GRKERNSEC
44133+ if (nd->flags & LOOKUP_RCU) {
44134+ error = -ECHILD;
44135+ goto exit;
44136+ }
44137+#endif
44138+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44139+ error = -ENOENT;
44140+ goto exit;
44141+ }
44142
44143 error = -ENOTDIR;
44144 if (nd->flags & LOOKUP_DIRECTORY) {
44145@@ -2178,6 +2255,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44146 /* Negative dentry, just create the file */
44147 if (!dentry->d_inode) {
44148 int mode = op->mode;
44149+
44150+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44151+ error = -EACCES;
44152+ goto exit_mutex_unlock;
44153+ }
44154+
44155 if (!IS_POSIXACL(dir->d_inode))
44156 mode &= ~current_umask();
44157 /*
44158@@ -2201,6 +2284,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44159 error = vfs_create(dir->d_inode, dentry, mode, nd);
44160 if (error)
44161 goto exit_mutex_unlock;
44162+ else
44163+ gr_handle_create(path->dentry, path->mnt);
44164 mutex_unlock(&dir->d_inode->i_mutex);
44165 dput(nd->path.dentry);
44166 nd->path.dentry = dentry;
44167@@ -2210,6 +2295,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44168 /*
44169 * It already exists.
44170 */
44171+
44172+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44173+ error = -ENOENT;
44174+ goto exit_mutex_unlock;
44175+ }
44176+
44177+ /* only check if O_CREAT is specified, all other checks need to go
44178+ into may_open */
44179+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44180+ error = -EACCES;
44181+ goto exit_mutex_unlock;
44182+ }
44183+
44184 mutex_unlock(&dir->d_inode->i_mutex);
44185 audit_inode(pathname, path->dentry);
44186
44187@@ -2422,6 +2520,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
44188 *path = nd.path;
44189 return dentry;
44190 eexist:
44191+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44192+ dput(dentry);
44193+ dentry = ERR_PTR(-ENOENT);
44194+ goto fail;
44195+ }
44196 dput(dentry);
44197 dentry = ERR_PTR(-EEXIST);
44198 fail:
44199@@ -2444,6 +2547,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
44200 }
44201 EXPORT_SYMBOL(user_path_create);
44202
44203+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44204+{
44205+ char *tmp = getname(pathname);
44206+ struct dentry *res;
44207+ if (IS_ERR(tmp))
44208+ return ERR_CAST(tmp);
44209+ res = kern_path_create(dfd, tmp, path, is_dir);
44210+ if (IS_ERR(res))
44211+ putname(tmp);
44212+ else
44213+ *to = tmp;
44214+ return res;
44215+}
44216+
44217 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44218 {
44219 int error = may_create(dir, dentry);
44220@@ -2511,6 +2628,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44221 error = mnt_want_write(path.mnt);
44222 if (error)
44223 goto out_dput;
44224+
44225+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44226+ error = -EPERM;
44227+ goto out_drop_write;
44228+ }
44229+
44230+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44231+ error = -EACCES;
44232+ goto out_drop_write;
44233+ }
44234+
44235 error = security_path_mknod(&path, dentry, mode, dev);
44236 if (error)
44237 goto out_drop_write;
44238@@ -2528,6 +2656,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44239 }
44240 out_drop_write:
44241 mnt_drop_write(path.mnt);
44242+
44243+ if (!error)
44244+ gr_handle_create(dentry, path.mnt);
44245 out_dput:
44246 dput(dentry);
44247 mutex_unlock(&path.dentry->d_inode->i_mutex);
44248@@ -2577,12 +2708,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
44249 error = mnt_want_write(path.mnt);
44250 if (error)
44251 goto out_dput;
44252+
44253+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44254+ error = -EACCES;
44255+ goto out_drop_write;
44256+ }
44257+
44258 error = security_path_mkdir(&path, dentry, mode);
44259 if (error)
44260 goto out_drop_write;
44261 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44262 out_drop_write:
44263 mnt_drop_write(path.mnt);
44264+
44265+ if (!error)
44266+ gr_handle_create(dentry, path.mnt);
44267 out_dput:
44268 dput(dentry);
44269 mutex_unlock(&path.dentry->d_inode->i_mutex);
44270@@ -2662,6 +2802,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44271 char * name;
44272 struct dentry *dentry;
44273 struct nameidata nd;
44274+ ino_t saved_ino = 0;
44275+ dev_t saved_dev = 0;
44276
44277 error = user_path_parent(dfd, pathname, &nd, &name);
44278 if (error)
44279@@ -2690,6 +2832,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
44280 error = -ENOENT;
44281 goto exit3;
44282 }
44283+
44284+ saved_ino = dentry->d_inode->i_ino;
44285+ saved_dev = gr_get_dev_from_dentry(dentry);
44286+
44287+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44288+ error = -EACCES;
44289+ goto exit3;
44290+ }
44291+
44292 error = mnt_want_write(nd.path.mnt);
44293 if (error)
44294 goto exit3;
44295@@ -2697,6 +2848,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44296 if (error)
44297 goto exit4;
44298 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44299+ if (!error && (saved_dev || saved_ino))
44300+ gr_handle_delete(saved_ino, saved_dev);
44301 exit4:
44302 mnt_drop_write(nd.path.mnt);
44303 exit3:
44304@@ -2759,6 +2912,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44305 struct dentry *dentry;
44306 struct nameidata nd;
44307 struct inode *inode = NULL;
44308+ ino_t saved_ino = 0;
44309+ dev_t saved_dev = 0;
44310
44311 error = user_path_parent(dfd, pathname, &nd, &name);
44312 if (error)
44313@@ -2781,6 +2936,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44314 if (!inode)
44315 goto slashes;
44316 ihold(inode);
44317+
44318+ if (inode->i_nlink <= 1) {
44319+ saved_ino = inode->i_ino;
44320+ saved_dev = gr_get_dev_from_dentry(dentry);
44321+ }
44322+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44323+ error = -EACCES;
44324+ goto exit2;
44325+ }
44326+
44327 error = mnt_want_write(nd.path.mnt);
44328 if (error)
44329 goto exit2;
44330@@ -2788,6 +2953,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44331 if (error)
44332 goto exit3;
44333 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44334+ if (!error && (saved_ino || saved_dev))
44335+ gr_handle_delete(saved_ino, saved_dev);
44336 exit3:
44337 mnt_drop_write(nd.path.mnt);
44338 exit2:
44339@@ -2863,10 +3030,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
44340 error = mnt_want_write(path.mnt);
44341 if (error)
44342 goto out_dput;
44343+
44344+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44345+ error = -EACCES;
44346+ goto out_drop_write;
44347+ }
44348+
44349 error = security_path_symlink(&path, dentry, from);
44350 if (error)
44351 goto out_drop_write;
44352 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44353+ if (!error)
44354+ gr_handle_create(dentry, path.mnt);
44355 out_drop_write:
44356 mnt_drop_write(path.mnt);
44357 out_dput:
44358@@ -2938,6 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44359 {
44360 struct dentry *new_dentry;
44361 struct path old_path, new_path;
44362+ char *to = NULL;
44363 int how = 0;
44364 int error;
44365
44366@@ -2961,7 +3137,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44367 if (error)
44368 return error;
44369
44370- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44371+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44372 error = PTR_ERR(new_dentry);
44373 if (IS_ERR(new_dentry))
44374 goto out;
44375@@ -2972,13 +3148,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44376 error = mnt_want_write(new_path.mnt);
44377 if (error)
44378 goto out_dput;
44379+
44380+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44381+ old_path.dentry->d_inode,
44382+ old_path.dentry->d_inode->i_mode, to)) {
44383+ error = -EACCES;
44384+ goto out_drop_write;
44385+ }
44386+
44387+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44388+ old_path.dentry, old_path.mnt, to)) {
44389+ error = -EACCES;
44390+ goto out_drop_write;
44391+ }
44392+
44393 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44394 if (error)
44395 goto out_drop_write;
44396 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44397+ if (!error)
44398+ gr_handle_create(new_dentry, new_path.mnt);
44399 out_drop_write:
44400 mnt_drop_write(new_path.mnt);
44401 out_dput:
44402+ putname(to);
44403 dput(new_dentry);
44404 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44405 path_put(&new_path);
44406@@ -3206,6 +3399,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44407 if (new_dentry == trap)
44408 goto exit5;
44409
44410+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44411+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44412+ to);
44413+ if (error)
44414+ goto exit5;
44415+
44416 error = mnt_want_write(oldnd.path.mnt);
44417 if (error)
44418 goto exit5;
44419@@ -3215,6 +3414,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44420 goto exit6;
44421 error = vfs_rename(old_dir->d_inode, old_dentry,
44422 new_dir->d_inode, new_dentry);
44423+ if (!error)
44424+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44425+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44426 exit6:
44427 mnt_drop_write(oldnd.path.mnt);
44428 exit5:
44429@@ -3240,6 +3442,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
44430
44431 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44432 {
44433+ char tmpbuf[64];
44434+ const char *newlink;
44435 int len;
44436
44437 len = PTR_ERR(link);
44438@@ -3249,7 +3453,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
44439 len = strlen(link);
44440 if (len > (unsigned) buflen)
44441 len = buflen;
44442- if (copy_to_user(buffer, link, len))
44443+
44444+ if (len < sizeof(tmpbuf)) {
44445+ memcpy(tmpbuf, link, len);
44446+ newlink = tmpbuf;
44447+ } else
44448+ newlink = link;
44449+
44450+ if (copy_to_user(buffer, newlink, len))
44451 len = -EFAULT;
44452 out:
44453 return len;
44454diff --git a/fs/namespace.c b/fs/namespace.c
44455index cfc6d44..b4632a5 100644
44456--- a/fs/namespace.c
44457+++ b/fs/namespace.c
44458@@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44459 if (!(sb->s_flags & MS_RDONLY))
44460 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44461 up_write(&sb->s_umount);
44462+
44463+ gr_log_remount(mnt->mnt_devname, retval);
44464+
44465 return retval;
44466 }
44467
44468@@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44469 br_write_unlock(vfsmount_lock);
44470 up_write(&namespace_sem);
44471 release_mounts(&umount_list);
44472+
44473+ gr_log_unmount(mnt->mnt_devname, retval);
44474+
44475 return retval;
44476 }
44477
44478@@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44479 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44480 MS_STRICTATIME);
44481
44482+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44483+ retval = -EPERM;
44484+ goto dput_out;
44485+ }
44486+
44487+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44488+ retval = -EPERM;
44489+ goto dput_out;
44490+ }
44491+
44492 if (flags & MS_REMOUNT)
44493 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44494 data_page);
44495@@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44496 dev_name, data_page);
44497 dput_out:
44498 path_put(&path);
44499+
44500+ gr_log_mount(dev_name, dir_name, retval);
44501+
44502 return retval;
44503 }
44504
44505@@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
44506 if (error)
44507 goto out2;
44508
44509+ if (gr_handle_chroot_pivot()) {
44510+ error = -EPERM;
44511+ goto out2;
44512+ }
44513+
44514 get_fs_root(current->fs, &root);
44515 error = lock_mount(&old);
44516 if (error)
44517diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
44518index 3db6b82..a57597e 100644
44519--- a/fs/nfs/blocklayout/blocklayout.c
44520+++ b/fs/nfs/blocklayout/blocklayout.c
44521@@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
44522 */
44523 struct parallel_io {
44524 struct kref refcnt;
44525- struct rpc_call_ops call_ops;
44526+ rpc_call_ops_no_const call_ops;
44527 void (*pnfs_callback) (void *data);
44528 void *data;
44529 };
44530diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
44531index 50a15fa..ca113f9 100644
44532--- a/fs/nfs/inode.c
44533+++ b/fs/nfs/inode.c
44534@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
44535 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44536 nfsi->attrtimeo_timestamp = jiffies;
44537
44538- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44539+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44540 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44541 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44542 else
44543@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
44544 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44545 }
44546
44547-static atomic_long_t nfs_attr_generation_counter;
44548+static atomic_long_unchecked_t nfs_attr_generation_counter;
44549
44550 static unsigned long nfs_read_attr_generation_counter(void)
44551 {
44552- return atomic_long_read(&nfs_attr_generation_counter);
44553+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44554 }
44555
44556 unsigned long nfs_inc_attr_generation_counter(void)
44557 {
44558- return atomic_long_inc_return(&nfs_attr_generation_counter);
44559+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44560 }
44561
44562 void nfs_fattr_init(struct nfs_fattr *fattr)
44563diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
44564index 7a2e442..8e544cc 100644
44565--- a/fs/nfsd/vfs.c
44566+++ b/fs/nfsd/vfs.c
44567@@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44568 } else {
44569 oldfs = get_fs();
44570 set_fs(KERNEL_DS);
44571- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44572+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44573 set_fs(oldfs);
44574 }
44575
44576@@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44577
44578 /* Write the data. */
44579 oldfs = get_fs(); set_fs(KERNEL_DS);
44580- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44581+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44582 set_fs(oldfs);
44583 if (host_err < 0)
44584 goto out_nfserr;
44585@@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
44586 */
44587
44588 oldfs = get_fs(); set_fs(KERNEL_DS);
44589- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44590+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44591 set_fs(oldfs);
44592
44593 if (host_err < 0)
44594diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
44595index 9fde1c0..14e8827 100644
44596--- a/fs/notify/fanotify/fanotify_user.c
44597+++ b/fs/notify/fanotify/fanotify_user.c
44598@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
44599 goto out_close_fd;
44600
44601 ret = -EFAULT;
44602- if (copy_to_user(buf, &fanotify_event_metadata,
44603+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
44604+ copy_to_user(buf, &fanotify_event_metadata,
44605 fanotify_event_metadata.event_len))
44606 goto out_kill_access_response;
44607
44608diff --git a/fs/notify/notification.c b/fs/notify/notification.c
44609index ee18815..7aa5d01 100644
44610--- a/fs/notify/notification.c
44611+++ b/fs/notify/notification.c
44612@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
44613 * get set to 0 so it will never get 'freed'
44614 */
44615 static struct fsnotify_event *q_overflow_event;
44616-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44617+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44618
44619 /**
44620 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44621@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44622 */
44623 u32 fsnotify_get_cookie(void)
44624 {
44625- return atomic_inc_return(&fsnotify_sync_cookie);
44626+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44627 }
44628 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44629
44630diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
44631index 99e3610..02c1068 100644
44632--- a/fs/ntfs/dir.c
44633+++ b/fs/ntfs/dir.c
44634@@ -1329,7 +1329,7 @@ find_next_index_buffer:
44635 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44636 ~(s64)(ndir->itype.index.block_size - 1)));
44637 /* Bounds checks. */
44638- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44639+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44640 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44641 "inode 0x%lx or driver bug.", vdir->i_ino);
44642 goto err_out;
44643diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
44644index c587e2d..3641eaa 100644
44645--- a/fs/ntfs/file.c
44646+++ b/fs/ntfs/file.c
44647@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
44648 #endif /* NTFS_RW */
44649 };
44650
44651-const struct file_operations ntfs_empty_file_ops = {};
44652+const struct file_operations ntfs_empty_file_ops __read_only;
44653
44654-const struct inode_operations ntfs_empty_inode_ops = {};
44655+const struct inode_operations ntfs_empty_inode_ops __read_only;
44656diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
44657index 210c352..a174f83 100644
44658--- a/fs/ocfs2/localalloc.c
44659+++ b/fs/ocfs2/localalloc.c
44660@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
44661 goto bail;
44662 }
44663
44664- atomic_inc(&osb->alloc_stats.moves);
44665+ atomic_inc_unchecked(&osb->alloc_stats.moves);
44666
44667 bail:
44668 if (handle)
44669diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
44670index d355e6e..578d905 100644
44671--- a/fs/ocfs2/ocfs2.h
44672+++ b/fs/ocfs2/ocfs2.h
44673@@ -235,11 +235,11 @@ enum ocfs2_vol_state
44674
44675 struct ocfs2_alloc_stats
44676 {
44677- atomic_t moves;
44678- atomic_t local_data;
44679- atomic_t bitmap_data;
44680- atomic_t bg_allocs;
44681- atomic_t bg_extends;
44682+ atomic_unchecked_t moves;
44683+ atomic_unchecked_t local_data;
44684+ atomic_unchecked_t bitmap_data;
44685+ atomic_unchecked_t bg_allocs;
44686+ atomic_unchecked_t bg_extends;
44687 };
44688
44689 enum ocfs2_local_alloc_state
44690diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
44691index ba5d97e..c77db25 100644
44692--- a/fs/ocfs2/suballoc.c
44693+++ b/fs/ocfs2/suballoc.c
44694@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
44695 mlog_errno(status);
44696 goto bail;
44697 }
44698- atomic_inc(&osb->alloc_stats.bg_extends);
44699+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44700
44701 /* You should never ask for this much metadata */
44702 BUG_ON(bits_wanted >
44703@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
44704 mlog_errno(status);
44705 goto bail;
44706 }
44707- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44708+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44709
44710 *suballoc_loc = res.sr_bg_blkno;
44711 *suballoc_bit_start = res.sr_bit_offset;
44712@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
44713 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
44714 res->sr_bits);
44715
44716- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44717+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44718
44719 BUG_ON(res->sr_bits != 1);
44720
44721@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
44722 mlog_errno(status);
44723 goto bail;
44724 }
44725- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44726+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44727
44728 BUG_ON(res.sr_bits != 1);
44729
44730@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
44731 cluster_start,
44732 num_clusters);
44733 if (!status)
44734- atomic_inc(&osb->alloc_stats.local_data);
44735+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
44736 } else {
44737 if (min_clusters > (osb->bitmap_cpg - 1)) {
44738 /* The only paths asking for contiguousness
44739@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
44740 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44741 res.sr_bg_blkno,
44742 res.sr_bit_offset);
44743- atomic_inc(&osb->alloc_stats.bitmap_data);
44744+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44745 *num_clusters = res.sr_bits;
44746 }
44747 }
44748diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
44749index 4994f8b..eaab8eb 100644
44750--- a/fs/ocfs2/super.c
44751+++ b/fs/ocfs2/super.c
44752@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
44753 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44754 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44755 "Stats",
44756- atomic_read(&osb->alloc_stats.bitmap_data),
44757- atomic_read(&osb->alloc_stats.local_data),
44758- atomic_read(&osb->alloc_stats.bg_allocs),
44759- atomic_read(&osb->alloc_stats.moves),
44760- atomic_read(&osb->alloc_stats.bg_extends));
44761+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44762+ atomic_read_unchecked(&osb->alloc_stats.local_data),
44763+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44764+ atomic_read_unchecked(&osb->alloc_stats.moves),
44765+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44766
44767 out += snprintf(buf + out, len - out,
44768 "%10s => State: %u Descriptor: %llu Size: %u bits "
44769@@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
44770 spin_lock_init(&osb->osb_xattr_lock);
44771 ocfs2_init_steal_slots(osb);
44772
44773- atomic_set(&osb->alloc_stats.moves, 0);
44774- atomic_set(&osb->alloc_stats.local_data, 0);
44775- atomic_set(&osb->alloc_stats.bitmap_data, 0);
44776- atomic_set(&osb->alloc_stats.bg_allocs, 0);
44777- atomic_set(&osb->alloc_stats.bg_extends, 0);
44778+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44779+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44780+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44781+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44782+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44783
44784 /* Copy the blockcheck stats from the superblock probe */
44785 osb->osb_ecc_stats = *stats;
44786diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
44787index 5d22872..523db20 100644
44788--- a/fs/ocfs2/symlink.c
44789+++ b/fs/ocfs2/symlink.c
44790@@ -142,7 +142,7 @@ bail:
44791
44792 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
44793 {
44794- char *link = nd_get_link(nd);
44795+ const char *link = nd_get_link(nd);
44796 if (!IS_ERR(link))
44797 kfree(link);
44798 }
44799diff --git a/fs/open.c b/fs/open.c
44800index 22c41b5..695cb17 100644
44801--- a/fs/open.c
44802+++ b/fs/open.c
44803@@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
44804 error = locks_verify_truncate(inode, NULL, length);
44805 if (!error)
44806 error = security_path_truncate(&path);
44807+
44808+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44809+ error = -EACCES;
44810+
44811 if (!error)
44812 error = do_truncate(path.dentry, length, 0, NULL);
44813
44814@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
44815 if (__mnt_is_readonly(path.mnt))
44816 res = -EROFS;
44817
44818+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44819+ res = -EACCES;
44820+
44821 out_path_release:
44822 path_put(&path);
44823 out:
44824@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
44825 if (error)
44826 goto dput_and_out;
44827
44828+ gr_log_chdir(path.dentry, path.mnt);
44829+
44830 set_fs_pwd(current->fs, &path);
44831
44832 dput_and_out:
44833@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
44834 goto out_putf;
44835
44836 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
44837+
44838+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44839+ error = -EPERM;
44840+
44841+ if (!error)
44842+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44843+
44844 if (!error)
44845 set_fs_pwd(current->fs, &file->f_path);
44846 out_putf:
44847@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
44848 if (error)
44849 goto dput_and_out;
44850
44851+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44852+ goto dput_and_out;
44853+
44854 set_fs_root(current->fs, &path);
44855+
44856+ gr_handle_chroot_chdir(&path);
44857+
44858 error = 0;
44859 dput_and_out:
44860 path_put(&path);
44861@@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
44862 if (error)
44863 return error;
44864 mutex_lock(&inode->i_mutex);
44865+
44866+ if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
44867+ error = -EACCES;
44868+ goto out_unlock;
44869+ }
44870+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
44871+ error = -EACCES;
44872+ goto out_unlock;
44873+ }
44874+
44875 error = security_path_chmod(path->dentry, path->mnt, mode);
44876 if (error)
44877 goto out_unlock;
44878@@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
44879 int error;
44880 struct iattr newattrs;
44881
44882+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
44883+ return -EACCES;
44884+
44885 newattrs.ia_valid = ATTR_CTIME;
44886 if (user != (uid_t) -1) {
44887 newattrs.ia_valid |= ATTR_UID;
44888diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
44889index 6296b40..417c00f 100644
44890--- a/fs/partitions/efi.c
44891+++ b/fs/partitions/efi.c
44892@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
44893 if (!gpt)
44894 return NULL;
44895
44896+ if (!le32_to_cpu(gpt->num_partition_entries))
44897+ return NULL;
44898+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
44899+ if (!pte)
44900+ return NULL;
44901+
44902 count = le32_to_cpu(gpt->num_partition_entries) *
44903 le32_to_cpu(gpt->sizeof_partition_entry);
44904- if (!count)
44905- return NULL;
44906- pte = kzalloc(count, GFP_KERNEL);
44907- if (!pte)
44908- return NULL;
44909-
44910 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
44911 (u8 *) pte,
44912 count) < count) {
44913diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
44914index bd8ae78..539d250 100644
44915--- a/fs/partitions/ldm.c
44916+++ b/fs/partitions/ldm.c
44917@@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
44918 goto found;
44919 }
44920
44921- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44922+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44923 if (!f) {
44924 ldm_crit ("Out of memory.");
44925 return false;
44926diff --git a/fs/pipe.c b/fs/pipe.c
44927index 4065f07..68c0706 100644
44928--- a/fs/pipe.c
44929+++ b/fs/pipe.c
44930@@ -420,9 +420,9 @@ redo:
44931 }
44932 if (bufs) /* More to do? */
44933 continue;
44934- if (!pipe->writers)
44935+ if (!atomic_read(&pipe->writers))
44936 break;
44937- if (!pipe->waiting_writers) {
44938+ if (!atomic_read(&pipe->waiting_writers)) {
44939 /* syscall merging: Usually we must not sleep
44940 * if O_NONBLOCK is set, or if we got some data.
44941 * But if a writer sleeps in kernel space, then
44942@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
44943 mutex_lock(&inode->i_mutex);
44944 pipe = inode->i_pipe;
44945
44946- if (!pipe->readers) {
44947+ if (!atomic_read(&pipe->readers)) {
44948 send_sig(SIGPIPE, current, 0);
44949 ret = -EPIPE;
44950 goto out;
44951@@ -530,7 +530,7 @@ redo1:
44952 for (;;) {
44953 int bufs;
44954
44955- if (!pipe->readers) {
44956+ if (!atomic_read(&pipe->readers)) {
44957 send_sig(SIGPIPE, current, 0);
44958 if (!ret)
44959 ret = -EPIPE;
44960@@ -616,9 +616,9 @@ redo2:
44961 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44962 do_wakeup = 0;
44963 }
44964- pipe->waiting_writers++;
44965+ atomic_inc(&pipe->waiting_writers);
44966 pipe_wait(pipe);
44967- pipe->waiting_writers--;
44968+ atomic_dec(&pipe->waiting_writers);
44969 }
44970 out:
44971 mutex_unlock(&inode->i_mutex);
44972@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
44973 mask = 0;
44974 if (filp->f_mode & FMODE_READ) {
44975 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
44976- if (!pipe->writers && filp->f_version != pipe->w_counter)
44977+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
44978 mask |= POLLHUP;
44979 }
44980
44981@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
44982 * Most Unices do not set POLLERR for FIFOs but on Linux they
44983 * behave exactly like pipes for poll().
44984 */
44985- if (!pipe->readers)
44986+ if (!atomic_read(&pipe->readers))
44987 mask |= POLLERR;
44988 }
44989
44990@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
44991
44992 mutex_lock(&inode->i_mutex);
44993 pipe = inode->i_pipe;
44994- pipe->readers -= decr;
44995- pipe->writers -= decw;
44996+ atomic_sub(decr, &pipe->readers);
44997+ atomic_sub(decw, &pipe->writers);
44998
44999- if (!pipe->readers && !pipe->writers) {
45000+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45001 free_pipe_info(inode);
45002 } else {
45003 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45004@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
45005
45006 if (inode->i_pipe) {
45007 ret = 0;
45008- inode->i_pipe->readers++;
45009+ atomic_inc(&inode->i_pipe->readers);
45010 }
45011
45012 mutex_unlock(&inode->i_mutex);
45013@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
45014
45015 if (inode->i_pipe) {
45016 ret = 0;
45017- inode->i_pipe->writers++;
45018+ atomic_inc(&inode->i_pipe->writers);
45019 }
45020
45021 mutex_unlock(&inode->i_mutex);
45022@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
45023 if (inode->i_pipe) {
45024 ret = 0;
45025 if (filp->f_mode & FMODE_READ)
45026- inode->i_pipe->readers++;
45027+ atomic_inc(&inode->i_pipe->readers);
45028 if (filp->f_mode & FMODE_WRITE)
45029- inode->i_pipe->writers++;
45030+ atomic_inc(&inode->i_pipe->writers);
45031 }
45032
45033 mutex_unlock(&inode->i_mutex);
45034@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45035 inode->i_pipe = NULL;
45036 }
45037
45038-static struct vfsmount *pipe_mnt __read_mostly;
45039+struct vfsmount *pipe_mnt __read_mostly;
45040
45041 /*
45042 * pipefs_dname() is called from d_path().
45043@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
45044 goto fail_iput;
45045 inode->i_pipe = pipe;
45046
45047- pipe->readers = pipe->writers = 1;
45048+ atomic_set(&pipe->readers, 1);
45049+ atomic_set(&pipe->writers, 1);
45050 inode->i_fop = &rdwr_pipefifo_fops;
45051
45052 /*
45053diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
45054index 15af622..0e9f4467 100644
45055--- a/fs/proc/Kconfig
45056+++ b/fs/proc/Kconfig
45057@@ -30,12 +30,12 @@ config PROC_FS
45058
45059 config PROC_KCORE
45060 bool "/proc/kcore support" if !ARM
45061- depends on PROC_FS && MMU
45062+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45063
45064 config PROC_VMCORE
45065 bool "/proc/vmcore support"
45066- depends on PROC_FS && CRASH_DUMP
45067- default y
45068+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45069+ default n
45070 help
45071 Exports the dump image of crashed kernel in ELF format.
45072
45073@@ -59,8 +59,8 @@ config PROC_SYSCTL
45074 limited in memory.
45075
45076 config PROC_PAGE_MONITOR
45077- default y
45078- depends on PROC_FS && MMU
45079+ default n
45080+ depends on PROC_FS && MMU && !GRKERNSEC
45081 bool "Enable /proc page monitoring" if EXPERT
45082 help
45083 Various /proc files exist to monitor process memory utilization:
45084diff --git a/fs/proc/array.c b/fs/proc/array.c
45085index 3a1dafd..d41fc37 100644
45086--- a/fs/proc/array.c
45087+++ b/fs/proc/array.c
45088@@ -60,6 +60,7 @@
45089 #include <linux/tty.h>
45090 #include <linux/string.h>
45091 #include <linux/mman.h>
45092+#include <linux/grsecurity.h>
45093 #include <linux/proc_fs.h>
45094 #include <linux/ioport.h>
45095 #include <linux/uaccess.h>
45096@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
45097 seq_putc(m, '\n');
45098 }
45099
45100+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45101+static inline void task_pax(struct seq_file *m, struct task_struct *p)
45102+{
45103+ if (p->mm)
45104+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45105+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45106+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45107+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45108+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45109+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45110+ else
45111+ seq_printf(m, "PaX:\t-----\n");
45112+}
45113+#endif
45114+
45115 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45116 struct pid *pid, struct task_struct *task)
45117 {
45118@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45119 task_cpus_allowed(m, task);
45120 cpuset_task_status_allowed(m, task);
45121 task_context_switch_counts(m, task);
45122+
45123+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45124+ task_pax(m, task);
45125+#endif
45126+
45127+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45128+ task_grsec_rbac(m, task);
45129+#endif
45130+
45131 return 0;
45132 }
45133
45134+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45135+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45136+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45137+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45138+#endif
45139+
45140 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45141 struct pid *pid, struct task_struct *task, int whole)
45142 {
45143@@ -449,6 +480,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45144 gtime = task->gtime;
45145 }
45146
45147+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45148+ if (PAX_RAND_FLAGS(mm)) {
45149+ eip = 0;
45150+ esp = 0;
45151+ wchan = 0;
45152+ }
45153+#endif
45154+#ifdef CONFIG_GRKERNSEC_HIDESYM
45155+ wchan = 0;
45156+ eip =0;
45157+ esp =0;
45158+#endif
45159+
45160 /* scale priority and nice values from timeslices to -20..20 */
45161 /* to make it look like a "normal" Unix priority/nice value */
45162 priority = task_prio(task);
45163@@ -489,9 +533,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45164 vsize,
45165 mm ? get_mm_rss(mm) : 0,
45166 rsslim,
45167+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45168+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45169+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45170+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45171+#else
45172 mm ? (permitted ? mm->start_code : 1) : 0,
45173 mm ? (permitted ? mm->end_code : 1) : 0,
45174 (permitted && mm) ? mm->start_stack : 0,
45175+#endif
45176 esp,
45177 eip,
45178 /* The signal information here is obsolete.
45179@@ -544,3 +594,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45180
45181 return 0;
45182 }
45183+
45184+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45185+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45186+{
45187+ u32 curr_ip = 0;
45188+ unsigned long flags;
45189+
45190+ if (lock_task_sighand(task, &flags)) {
45191+ curr_ip = task->signal->curr_ip;
45192+ unlock_task_sighand(task, &flags);
45193+ }
45194+
45195+ return sprintf(buffer, "%pI4\n", &curr_ip);
45196+}
45197+#endif
45198diff --git a/fs/proc/base.c b/fs/proc/base.c
45199index 1fc1dca..813fd0b 100644
45200--- a/fs/proc/base.c
45201+++ b/fs/proc/base.c
45202@@ -107,6 +107,22 @@ struct pid_entry {
45203 union proc_op op;
45204 };
45205
45206+struct getdents_callback {
45207+ struct linux_dirent __user * current_dir;
45208+ struct linux_dirent __user * previous;
45209+ struct file * file;
45210+ int count;
45211+ int error;
45212+};
45213+
45214+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45215+ loff_t offset, u64 ino, unsigned int d_type)
45216+{
45217+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45218+ buf->error = -EINVAL;
45219+ return 0;
45220+}
45221+
45222 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45223 .name = (NAME), \
45224 .len = sizeof(NAME) - 1, \
45225@@ -204,10 +220,12 @@ static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
45226 return ERR_PTR(err);
45227
45228 mm = get_task_mm(task);
45229- if (mm && mm != current->mm &&
45230- !ptrace_may_access(task, mode)) {
45231- mmput(mm);
45232- mm = ERR_PTR(-EACCES);
45233+ if (mm) {
45234+ if ((mm != current->mm && !ptrace_may_access(task, mode)) ||
45235+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task)))) {
45236+ mmput(mm);
45237+ mm = ERR_PTR(-EACCES);
45238+ }
45239 }
45240 mutex_unlock(&task->signal->cred_guard_mutex);
45241
45242@@ -229,6 +247,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
45243 if (!mm->arg_end)
45244 goto out_mm; /* Shh! No looking before we're done */
45245
45246+ if (gr_acl_handle_procpidmem(task))
45247+ goto out_mm;
45248+
45249 len = mm->arg_end - mm->arg_start;
45250
45251 if (len > PAGE_SIZE)
45252@@ -256,12 +277,28 @@ out:
45253 return res;
45254 }
45255
45256+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45257+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45258+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45259+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45260+#endif
45261+
45262 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45263 {
45264 struct mm_struct *mm = mm_for_maps(task);
45265 int res = PTR_ERR(mm);
45266 if (mm && !IS_ERR(mm)) {
45267 unsigned int nwords = 0;
45268+
45269+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45270+ /* allow if we're currently ptracing this task */
45271+ if (PAX_RAND_FLAGS(mm) &&
45272+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45273+ mmput(mm);
45274+ return 0;
45275+ }
45276+#endif
45277+
45278 do {
45279 nwords += 2;
45280 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45281@@ -275,7 +312,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
45282 }
45283
45284
45285-#ifdef CONFIG_KALLSYMS
45286+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45287 /*
45288 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45289 * Returns the resolved symbol. If that fails, simply return the address.
45290@@ -314,7 +351,7 @@ static void unlock_trace(struct task_struct *task)
45291 mutex_unlock(&task->signal->cred_guard_mutex);
45292 }
45293
45294-#ifdef CONFIG_STACKTRACE
45295+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45296
45297 #define MAX_STACK_TRACE_DEPTH 64
45298
45299@@ -505,7 +542,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
45300 return count;
45301 }
45302
45303-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45304+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45305 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45306 {
45307 long nr;
45308@@ -534,7 +571,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
45309 /************************************************************************/
45310
45311 /* permission checks */
45312-static int proc_fd_access_allowed(struct inode *inode)
45313+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45314 {
45315 struct task_struct *task;
45316 int allowed = 0;
45317@@ -544,7 +581,10 @@ static int proc_fd_access_allowed(struct inode *inode)
45318 */
45319 task = get_proc_task(inode);
45320 if (task) {
45321- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45322+ if (log)
45323+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45324+ else
45325+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45326 put_task_struct(task);
45327 }
45328 return allowed;
45329@@ -826,6 +866,10 @@ static ssize_t mem_read(struct file * file, char __user * buf,
45330 return ret;
45331 }
45332
45333+#define mem_write NULL
45334+
45335+#ifndef mem_write
45336+/* They were right the first time */
45337 static ssize_t mem_write(struct file * file, const char __user *buf,
45338 size_t count, loff_t *ppos)
45339 {
45340@@ -866,6 +910,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
45341 free_page((unsigned long) page);
45342 return copied;
45343 }
45344+#endif
45345
45346 loff_t mem_lseek(struct file *file, loff_t offset, int orig)
45347 {
45348@@ -911,6 +956,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
45349 if (!task)
45350 goto out_no_task;
45351
45352+ if (gr_acl_handle_procpidmem(task))
45353+ goto out;
45354+
45355 ret = -ENOMEM;
45356 page = (char *)__get_free_page(GFP_TEMPORARY);
45357 if (!page)
45358@@ -1533,7 +1581,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
45359 path_put(&nd->path);
45360
45361 /* Are we allowed to snoop on the tasks file descriptors? */
45362- if (!proc_fd_access_allowed(inode))
45363+ if (!proc_fd_access_allowed(inode,0))
45364 goto out;
45365
45366 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45367@@ -1572,8 +1620,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
45368 struct path path;
45369
45370 /* Are we allowed to snoop on the tasks file descriptors? */
45371- if (!proc_fd_access_allowed(inode))
45372- goto out;
45373+ /* logging this is needed for learning on chromium to work properly,
45374+ but we don't want to flood the logs from 'ps' which does a readlink
45375+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45376+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45377+ */
45378+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45379+ if (!proc_fd_access_allowed(inode,0))
45380+ goto out;
45381+ } else {
45382+ if (!proc_fd_access_allowed(inode,1))
45383+ goto out;
45384+ }
45385
45386 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45387 if (error)
45388@@ -1638,7 +1696,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
45389 rcu_read_lock();
45390 cred = __task_cred(task);
45391 inode->i_uid = cred->euid;
45392+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45393+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45394+#else
45395 inode->i_gid = cred->egid;
45396+#endif
45397 rcu_read_unlock();
45398 }
45399 security_task_to_inode(task, inode);
45400@@ -1656,6 +1718,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45401 struct inode *inode = dentry->d_inode;
45402 struct task_struct *task;
45403 const struct cred *cred;
45404+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45405+ const struct cred *tmpcred = current_cred();
45406+#endif
45407
45408 generic_fillattr(inode, stat);
45409
45410@@ -1663,13 +1728,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45411 stat->uid = 0;
45412 stat->gid = 0;
45413 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45414+
45415+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45416+ rcu_read_unlock();
45417+ return -ENOENT;
45418+ }
45419+
45420 if (task) {
45421+ cred = __task_cred(task);
45422+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45423+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45424+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45425+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45426+#endif
45427+ ) {
45428+#endif
45429 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45430+#ifdef CONFIG_GRKERNSEC_PROC_USER
45431+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45432+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45433+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45434+#endif
45435 task_dumpable(task)) {
45436- cred = __task_cred(task);
45437 stat->uid = cred->euid;
45438+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45439+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45440+#else
45441 stat->gid = cred->egid;
45442+#endif
45443 }
45444+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45445+ } else {
45446+ rcu_read_unlock();
45447+ return -ENOENT;
45448+ }
45449+#endif
45450 }
45451 rcu_read_unlock();
45452 return 0;
45453@@ -1706,11 +1799,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
45454
45455 if (task) {
45456 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45457+#ifdef CONFIG_GRKERNSEC_PROC_USER
45458+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45459+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45460+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45461+#endif
45462 task_dumpable(task)) {
45463 rcu_read_lock();
45464 cred = __task_cred(task);
45465 inode->i_uid = cred->euid;
45466+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45467+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45468+#else
45469 inode->i_gid = cred->egid;
45470+#endif
45471 rcu_read_unlock();
45472 } else {
45473 inode->i_uid = 0;
45474@@ -1828,7 +1930,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
45475 int fd = proc_fd(inode);
45476
45477 if (task) {
45478- files = get_files_struct(task);
45479+ if (!gr_acl_handle_procpidmem(task))
45480+ files = get_files_struct(task);
45481 put_task_struct(task);
45482 }
45483 if (files) {
45484@@ -2096,11 +2199,21 @@ static const struct file_operations proc_fd_operations = {
45485 */
45486 static int proc_fd_permission(struct inode *inode, int mask)
45487 {
45488+ struct task_struct *task;
45489 int rv = generic_permission(inode, mask);
45490- if (rv == 0)
45491- return 0;
45492+
45493 if (task_pid(current) == proc_pid(inode))
45494 rv = 0;
45495+
45496+ task = get_proc_task(inode);
45497+ if (task == NULL)
45498+ return rv;
45499+
45500+ if (gr_acl_handle_procpidmem(task))
45501+ rv = -EACCES;
45502+
45503+ put_task_struct(task);
45504+
45505 return rv;
45506 }
45507
45508@@ -2210,6 +2323,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
45509 if (!task)
45510 goto out_no_task;
45511
45512+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45513+ goto out;
45514+
45515 /*
45516 * Yes, it does not scale. And it should not. Don't add
45517 * new entries into /proc/<tgid>/ without very good reasons.
45518@@ -2254,6 +2370,9 @@ static int proc_pident_readdir(struct file *filp,
45519 if (!task)
45520 goto out_no_task;
45521
45522+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45523+ goto out;
45524+
45525 ret = 0;
45526 i = filp->f_pos;
45527 switch (i) {
45528@@ -2524,7 +2643,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
45529 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45530 void *cookie)
45531 {
45532- char *s = nd_get_link(nd);
45533+ const char *s = nd_get_link(nd);
45534 if (!IS_ERR(s))
45535 __putname(s);
45536 }
45537@@ -2722,7 +2841,7 @@ static const struct pid_entry tgid_base_stuff[] = {
45538 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45539 #endif
45540 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45541-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45542+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45543 INF("syscall", S_IRUGO, proc_pid_syscall),
45544 #endif
45545 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45546@@ -2747,10 +2866,10 @@ static const struct pid_entry tgid_base_stuff[] = {
45547 #ifdef CONFIG_SECURITY
45548 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45549 #endif
45550-#ifdef CONFIG_KALLSYMS
45551+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45552 INF("wchan", S_IRUGO, proc_pid_wchan),
45553 #endif
45554-#ifdef CONFIG_STACKTRACE
45555+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45556 ONE("stack", S_IRUGO, proc_pid_stack),
45557 #endif
45558 #ifdef CONFIG_SCHEDSTATS
45559@@ -2784,6 +2903,9 @@ static const struct pid_entry tgid_base_stuff[] = {
45560 #ifdef CONFIG_HARDWALL
45561 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45562 #endif
45563+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45564+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45565+#endif
45566 };
45567
45568 static int proc_tgid_base_readdir(struct file * filp,
45569@@ -2909,7 +3031,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
45570 if (!inode)
45571 goto out;
45572
45573+#ifdef CONFIG_GRKERNSEC_PROC_USER
45574+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45575+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45576+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45577+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45578+#else
45579 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45580+#endif
45581 inode->i_op = &proc_tgid_base_inode_operations;
45582 inode->i_fop = &proc_tgid_base_operations;
45583 inode->i_flags|=S_IMMUTABLE;
45584@@ -2951,7 +3080,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
45585 if (!task)
45586 goto out;
45587
45588+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45589+ goto out_put_task;
45590+
45591 result = proc_pid_instantiate(dir, dentry, task, NULL);
45592+out_put_task:
45593 put_task_struct(task);
45594 out:
45595 return result;
45596@@ -3016,6 +3149,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
45597 {
45598 unsigned int nr;
45599 struct task_struct *reaper;
45600+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45601+ const struct cred *tmpcred = current_cred();
45602+ const struct cred *itercred;
45603+#endif
45604+ filldir_t __filldir = filldir;
45605 struct tgid_iter iter;
45606 struct pid_namespace *ns;
45607
45608@@ -3039,8 +3177,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
45609 for (iter = next_tgid(ns, iter);
45610 iter.task;
45611 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45612+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45613+ rcu_read_lock();
45614+ itercred = __task_cred(iter.task);
45615+#endif
45616+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45617+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45618+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45619+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45620+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45621+#endif
45622+ )
45623+#endif
45624+ )
45625+ __filldir = &gr_fake_filldir;
45626+ else
45627+ __filldir = filldir;
45628+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45629+ rcu_read_unlock();
45630+#endif
45631 filp->f_pos = iter.tgid + TGID_OFFSET;
45632- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45633+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45634 put_task_struct(iter.task);
45635 goto out;
45636 }
45637@@ -3068,7 +3225,7 @@ static const struct pid_entry tid_base_stuff[] = {
45638 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45639 #endif
45640 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45641-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45642+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45643 INF("syscall", S_IRUGO, proc_pid_syscall),
45644 #endif
45645 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45646@@ -3092,10 +3249,10 @@ static const struct pid_entry tid_base_stuff[] = {
45647 #ifdef CONFIG_SECURITY
45648 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45649 #endif
45650-#ifdef CONFIG_KALLSYMS
45651+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45652 INF("wchan", S_IRUGO, proc_pid_wchan),
45653 #endif
45654-#ifdef CONFIG_STACKTRACE
45655+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45656 ONE("stack", S_IRUGO, proc_pid_stack),
45657 #endif
45658 #ifdef CONFIG_SCHEDSTATS
45659diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
45660index 82676e3..5f8518a 100644
45661--- a/fs/proc/cmdline.c
45662+++ b/fs/proc/cmdline.c
45663@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
45664
45665 static int __init proc_cmdline_init(void)
45666 {
45667+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45668+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45669+#else
45670 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45671+#endif
45672 return 0;
45673 }
45674 module_init(proc_cmdline_init);
45675diff --git a/fs/proc/devices.c b/fs/proc/devices.c
45676index b143471..bb105e5 100644
45677--- a/fs/proc/devices.c
45678+++ b/fs/proc/devices.c
45679@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
45680
45681 static int __init proc_devices_init(void)
45682 {
45683+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45684+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45685+#else
45686 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45687+#endif
45688 return 0;
45689 }
45690 module_init(proc_devices_init);
45691diff --git a/fs/proc/inode.c b/fs/proc/inode.c
45692index 7737c54..7172574 100644
45693--- a/fs/proc/inode.c
45694+++ b/fs/proc/inode.c
45695@@ -18,12 +18,18 @@
45696 #include <linux/module.h>
45697 #include <linux/sysctl.h>
45698 #include <linux/slab.h>
45699+#include <linux/grsecurity.h>
45700
45701 #include <asm/system.h>
45702 #include <asm/uaccess.h>
45703
45704 #include "internal.h"
45705
45706+#ifdef CONFIG_PROC_SYSCTL
45707+extern const struct inode_operations proc_sys_inode_operations;
45708+extern const struct inode_operations proc_sys_dir_operations;
45709+#endif
45710+
45711 static void proc_evict_inode(struct inode *inode)
45712 {
45713 struct proc_dir_entry *de;
45714@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
45715 ns_ops = PROC_I(inode)->ns_ops;
45716 if (ns_ops && ns_ops->put)
45717 ns_ops->put(PROC_I(inode)->ns);
45718+
45719+#ifdef CONFIG_PROC_SYSCTL
45720+ if (inode->i_op == &proc_sys_inode_operations ||
45721+ inode->i_op == &proc_sys_dir_operations)
45722+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
45723+#endif
45724+
45725 }
45726
45727 static struct kmem_cache * proc_inode_cachep;
45728@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
45729 if (de->mode) {
45730 inode->i_mode = de->mode;
45731 inode->i_uid = de->uid;
45732+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45733+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45734+#else
45735 inode->i_gid = de->gid;
45736+#endif
45737 }
45738 if (de->size)
45739 inode->i_size = de->size;
45740diff --git a/fs/proc/internal.h b/fs/proc/internal.h
45741index 7838e5c..ff92cbc 100644
45742--- a/fs/proc/internal.h
45743+++ b/fs/proc/internal.h
45744@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45745 struct pid *pid, struct task_struct *task);
45746 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45747 struct pid *pid, struct task_struct *task);
45748+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45749+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45750+#endif
45751 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45752
45753 extern const struct file_operations proc_maps_operations;
45754diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
45755index d245cb2..f4e8498 100644
45756--- a/fs/proc/kcore.c
45757+++ b/fs/proc/kcore.c
45758@@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
45759 * the addresses in the elf_phdr on our list.
45760 */
45761 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45762- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45763+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45764+ if (tsz > buflen)
45765 tsz = buflen;
45766-
45767+
45768 while (buflen) {
45769 struct kcore_list *m;
45770
45771@@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
45772 kfree(elf_buf);
45773 } else {
45774 if (kern_addr_valid(start)) {
45775- unsigned long n;
45776+ char *elf_buf;
45777+ mm_segment_t oldfs;
45778
45779- n = copy_to_user(buffer, (char *)start, tsz);
45780- /*
45781- * We cannot distingush between fault on source
45782- * and fault on destination. When this happens
45783- * we clear too and hope it will trigger the
45784- * EFAULT again.
45785- */
45786- if (n) {
45787- if (clear_user(buffer + tsz - n,
45788- n))
45789+ elf_buf = kmalloc(tsz, GFP_KERNEL);
45790+ if (!elf_buf)
45791+ return -ENOMEM;
45792+ oldfs = get_fs();
45793+ set_fs(KERNEL_DS);
45794+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45795+ set_fs(oldfs);
45796+ if (copy_to_user(buffer, elf_buf, tsz)) {
45797+ kfree(elf_buf);
45798 return -EFAULT;
45799+ }
45800 }
45801+ set_fs(oldfs);
45802+ kfree(elf_buf);
45803 } else {
45804 if (clear_user(buffer, tsz))
45805 return -EFAULT;
45806@@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
45807
45808 static int open_kcore(struct inode *inode, struct file *filp)
45809 {
45810+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
45811+ return -EPERM;
45812+#endif
45813 if (!capable(CAP_SYS_RAWIO))
45814 return -EPERM;
45815 if (kcore_need_update)
45816diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
45817index 80e4645..53e5fcf 100644
45818--- a/fs/proc/meminfo.c
45819+++ b/fs/proc/meminfo.c
45820@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
45821 vmi.used >> 10,
45822 vmi.largest_chunk >> 10
45823 #ifdef CONFIG_MEMORY_FAILURE
45824- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
45825+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
45826 #endif
45827 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
45828 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
45829diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
45830index b1822dd..df622cb 100644
45831--- a/fs/proc/nommu.c
45832+++ b/fs/proc/nommu.c
45833@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
45834 if (len < 1)
45835 len = 1;
45836 seq_printf(m, "%*c", len, ' ');
45837- seq_path(m, &file->f_path, "");
45838+ seq_path(m, &file->f_path, "\n\\");
45839 }
45840
45841 seq_putc(m, '\n');
45842diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
45843index f738024..876984a 100644
45844--- a/fs/proc/proc_net.c
45845+++ b/fs/proc/proc_net.c
45846@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
45847 struct task_struct *task;
45848 struct nsproxy *ns;
45849 struct net *net = NULL;
45850+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45851+ const struct cred *cred = current_cred();
45852+#endif
45853+
45854+#ifdef CONFIG_GRKERNSEC_PROC_USER
45855+ if (cred->fsuid)
45856+ return net;
45857+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45858+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
45859+ return net;
45860+#endif
45861
45862 rcu_read_lock();
45863 task = pid_task(proc_pid(dir), PIDTYPE_PID);
45864diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
45865index a6b6217..1e0579d 100644
45866--- a/fs/proc/proc_sysctl.c
45867+++ b/fs/proc/proc_sysctl.c
45868@@ -9,11 +9,13 @@
45869 #include <linux/namei.h>
45870 #include "internal.h"
45871
45872+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
45873+
45874 static const struct dentry_operations proc_sys_dentry_operations;
45875 static const struct file_operations proc_sys_file_operations;
45876-static const struct inode_operations proc_sys_inode_operations;
45877+const struct inode_operations proc_sys_inode_operations;
45878 static const struct file_operations proc_sys_dir_file_operations;
45879-static const struct inode_operations proc_sys_dir_operations;
45880+const struct inode_operations proc_sys_dir_operations;
45881
45882 void proc_sys_poll_notify(struct ctl_table_poll *poll)
45883 {
45884@@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
45885
45886 err = NULL;
45887 d_set_d_op(dentry, &proc_sys_dentry_operations);
45888+
45889+ gr_handle_proc_create(dentry, inode);
45890+
45891 d_add(dentry, inode);
45892
45893+ if (gr_handle_sysctl(p, MAY_EXEC))
45894+ err = ERR_PTR(-ENOENT);
45895+
45896 out:
45897 sysctl_head_finish(head);
45898 return err;
45899@@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
45900 if (!table->proc_handler)
45901 goto out;
45902
45903+#ifdef CONFIG_GRKERNSEC
45904+ error = -EPERM;
45905+ if (write && !capable(CAP_SYS_ADMIN))
45906+ goto out;
45907+#endif
45908+
45909 /* careful: calling conventions are nasty here */
45910 res = count;
45911 error = table->proc_handler(table, write, buf, &res, ppos);
45912@@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
45913 return -ENOMEM;
45914 } else {
45915 d_set_d_op(child, &proc_sys_dentry_operations);
45916+
45917+ gr_handle_proc_create(child, inode);
45918+
45919 d_add(child, inode);
45920 }
45921 } else {
45922@@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
45923 if (*pos < file->f_pos)
45924 continue;
45925
45926+ if (gr_handle_sysctl(table, 0))
45927+ continue;
45928+
45929 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
45930 if (res)
45931 return res;
45932@@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
45933 if (IS_ERR(head))
45934 return PTR_ERR(head);
45935
45936+ if (table && gr_handle_sysctl(table, MAY_EXEC))
45937+ return -ENOENT;
45938+
45939 generic_fillattr(inode, stat);
45940 if (table)
45941 stat->mode = (stat->mode & S_IFMT) | table->mode;
45942@@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
45943 .llseek = generic_file_llseek,
45944 };
45945
45946-static const struct inode_operations proc_sys_inode_operations = {
45947+const struct inode_operations proc_sys_inode_operations = {
45948 .permission = proc_sys_permission,
45949 .setattr = proc_sys_setattr,
45950 .getattr = proc_sys_getattr,
45951 };
45952
45953-static const struct inode_operations proc_sys_dir_operations = {
45954+const struct inode_operations proc_sys_dir_operations = {
45955 .lookup = proc_sys_lookup,
45956 .permission = proc_sys_permission,
45957 .setattr = proc_sys_setattr,
45958diff --git a/fs/proc/root.c b/fs/proc/root.c
45959index 03102d9..4ae347e 100644
45960--- a/fs/proc/root.c
45961+++ b/fs/proc/root.c
45962@@ -121,7 +121,15 @@ void __init proc_root_init(void)
45963 #ifdef CONFIG_PROC_DEVICETREE
45964 proc_device_tree_init();
45965 #endif
45966+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45967+#ifdef CONFIG_GRKERNSEC_PROC_USER
45968+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
45969+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45970+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45971+#endif
45972+#else
45973 proc_mkdir("bus", NULL);
45974+#endif
45975 proc_sys_init();
45976 }
45977
45978diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
45979index 7dcd2a2..d1d9cb6 100644
45980--- a/fs/proc/task_mmu.c
45981+++ b/fs/proc/task_mmu.c
45982@@ -52,8 +52,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
45983 "VmExe:\t%8lu kB\n"
45984 "VmLib:\t%8lu kB\n"
45985 "VmPTE:\t%8lu kB\n"
45986- "VmSwap:\t%8lu kB\n",
45987- hiwater_vm << (PAGE_SHIFT-10),
45988+ "VmSwap:\t%8lu kB\n"
45989+
45990+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45991+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
45992+#endif
45993+
45994+ ,hiwater_vm << (PAGE_SHIFT-10),
45995 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
45996 mm->locked_vm << (PAGE_SHIFT-10),
45997 mm->pinned_vm << (PAGE_SHIFT-10),
45998@@ -62,7 +67,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
45999 data << (PAGE_SHIFT-10),
46000 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46001 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46002- swap << (PAGE_SHIFT-10));
46003+ swap << (PAGE_SHIFT-10)
46004+
46005+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46006+ , mm->context.user_cs_base, mm->context.user_cs_limit
46007+#endif
46008+
46009+ );
46010 }
46011
46012 unsigned long task_vsize(struct mm_struct *mm)
46013@@ -209,6 +220,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
46014 return ret;
46015 }
46016
46017+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46018+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46019+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46020+ _mm->pax_flags & MF_PAX_SEGMEXEC))
46021+#endif
46022+
46023 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46024 {
46025 struct mm_struct *mm = vma->vm_mm;
46026@@ -227,13 +244,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46027 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46028 }
46029
46030- /* We don't show the stack guard page in /proc/maps */
46031+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46032+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46033+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46034+#else
46035 start = vma->vm_start;
46036- if (stack_guard_page_start(vma, start))
46037- start += PAGE_SIZE;
46038 end = vma->vm_end;
46039- if (stack_guard_page_end(vma, end))
46040- end -= PAGE_SIZE;
46041+#endif
46042
46043 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46044 start,
46045@@ -242,7 +259,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46046 flags & VM_WRITE ? 'w' : '-',
46047 flags & VM_EXEC ? 'x' : '-',
46048 flags & VM_MAYSHARE ? 's' : 'p',
46049+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46050+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46051+#else
46052 pgoff,
46053+#endif
46054 MAJOR(dev), MINOR(dev), ino, &len);
46055
46056 /*
46057@@ -251,7 +272,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46058 */
46059 if (file) {
46060 pad_len_spaces(m, len);
46061- seq_path(m, &file->f_path, "\n");
46062+ seq_path(m, &file->f_path, "\n\\");
46063 } else {
46064 const char *name = arch_vma_name(vma);
46065 if (!name) {
46066@@ -259,8 +280,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46067 if (vma->vm_start <= mm->brk &&
46068 vma->vm_end >= mm->start_brk) {
46069 name = "[heap]";
46070- } else if (vma->vm_start <= mm->start_stack &&
46071- vma->vm_end >= mm->start_stack) {
46072+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46073+ (vma->vm_start <= mm->start_stack &&
46074+ vma->vm_end >= mm->start_stack)) {
46075 name = "[stack]";
46076 }
46077 } else {
46078@@ -435,11 +457,16 @@ static int show_smap(struct seq_file *m, void *v)
46079 };
46080
46081 memset(&mss, 0, sizeof mss);
46082- mss.vma = vma;
46083- /* mmap_sem is held in m_start */
46084- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46085- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46086-
46087+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46088+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46089+#endif
46090+ mss.vma = vma;
46091+ /* mmap_sem is held in m_start */
46092+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46093+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46094+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46095+ }
46096+#endif
46097 show_map_vma(m, vma);
46098
46099 seq_printf(m,
46100@@ -457,7 +484,11 @@ static int show_smap(struct seq_file *m, void *v)
46101 "KernelPageSize: %8lu kB\n"
46102 "MMUPageSize: %8lu kB\n"
46103 "Locked: %8lu kB\n",
46104+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46105+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46106+#else
46107 (vma->vm_end - vma->vm_start) >> 10,
46108+#endif
46109 mss.resident >> 10,
46110 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46111 mss.shared_clean >> 10,
46112@@ -1036,7 +1067,7 @@ static int show_numa_map(struct seq_file *m, void *v)
46113
46114 if (file) {
46115 seq_printf(m, " file=");
46116- seq_path(m, &file->f_path, "\n\t= ");
46117+ seq_path(m, &file->f_path, "\n\t\\= ");
46118 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46119 seq_printf(m, " heap");
46120 } else if (vma->vm_start <= mm->start_stack &&
46121diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
46122index 980de54..2a4db5f 100644
46123--- a/fs/proc/task_nommu.c
46124+++ b/fs/proc/task_nommu.c
46125@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46126 else
46127 bytes += kobjsize(mm);
46128
46129- if (current->fs && current->fs->users > 1)
46130+ if (current->fs && atomic_read(&current->fs->users) > 1)
46131 sbytes += kobjsize(current->fs);
46132 else
46133 bytes += kobjsize(current->fs);
46134@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
46135
46136 if (file) {
46137 pad_len_spaces(m, len);
46138- seq_path(m, &file->f_path, "");
46139+ seq_path(m, &file->f_path, "\n\\");
46140 } else if (mm) {
46141 if (vma->vm_start <= mm->start_stack &&
46142 vma->vm_end >= mm->start_stack) {
46143diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
46144index d67908b..d13f6a6 100644
46145--- a/fs/quota/netlink.c
46146+++ b/fs/quota/netlink.c
46147@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
46148 void quota_send_warning(short type, unsigned int id, dev_t dev,
46149 const char warntype)
46150 {
46151- static atomic_t seq;
46152+ static atomic_unchecked_t seq;
46153 struct sk_buff *skb;
46154 void *msg_head;
46155 int ret;
46156@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
46157 "VFS: Not enough memory to send quota warning.\n");
46158 return;
46159 }
46160- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46161+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46162 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46163 if (!msg_head) {
46164 printk(KERN_ERR
46165diff --git a/fs/readdir.c b/fs/readdir.c
46166index 356f715..c918d38 100644
46167--- a/fs/readdir.c
46168+++ b/fs/readdir.c
46169@@ -17,6 +17,7 @@
46170 #include <linux/security.h>
46171 #include <linux/syscalls.h>
46172 #include <linux/unistd.h>
46173+#include <linux/namei.h>
46174
46175 #include <asm/uaccess.h>
46176
46177@@ -67,6 +68,7 @@ struct old_linux_dirent {
46178
46179 struct readdir_callback {
46180 struct old_linux_dirent __user * dirent;
46181+ struct file * file;
46182 int result;
46183 };
46184
46185@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
46186 buf->result = -EOVERFLOW;
46187 return -EOVERFLOW;
46188 }
46189+
46190+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46191+ return 0;
46192+
46193 buf->result++;
46194 dirent = buf->dirent;
46195 if (!access_ok(VERIFY_WRITE, dirent,
46196@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
46197
46198 buf.result = 0;
46199 buf.dirent = dirent;
46200+ buf.file = file;
46201
46202 error = vfs_readdir(file, fillonedir, &buf);
46203 if (buf.result)
46204@@ -142,6 +149,7 @@ struct linux_dirent {
46205 struct getdents_callback {
46206 struct linux_dirent __user * current_dir;
46207 struct linux_dirent __user * previous;
46208+ struct file * file;
46209 int count;
46210 int error;
46211 };
46212@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
46213 buf->error = -EOVERFLOW;
46214 return -EOVERFLOW;
46215 }
46216+
46217+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46218+ return 0;
46219+
46220 dirent = buf->previous;
46221 if (dirent) {
46222 if (__put_user(offset, &dirent->d_off))
46223@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
46224 buf.previous = NULL;
46225 buf.count = count;
46226 buf.error = 0;
46227+ buf.file = file;
46228
46229 error = vfs_readdir(file, filldir, &buf);
46230 if (error >= 0)
46231@@ -229,6 +242,7 @@ out:
46232 struct getdents_callback64 {
46233 struct linux_dirent64 __user * current_dir;
46234 struct linux_dirent64 __user * previous;
46235+ struct file *file;
46236 int count;
46237 int error;
46238 };
46239@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
46240 buf->error = -EINVAL; /* only used if we fail.. */
46241 if (reclen > buf->count)
46242 return -EINVAL;
46243+
46244+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46245+ return 0;
46246+
46247 dirent = buf->previous;
46248 if (dirent) {
46249 if (__put_user(offset, &dirent->d_off))
46250@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46251
46252 buf.current_dir = dirent;
46253 buf.previous = NULL;
46254+ buf.file = file;
46255 buf.count = count;
46256 buf.error = 0;
46257
46258@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46259 error = buf.error;
46260 lastdirent = buf.previous;
46261 if (lastdirent) {
46262- typeof(lastdirent->d_off) d_off = file->f_pos;
46263+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46264 if (__put_user(d_off, &lastdirent->d_off))
46265 error = -EFAULT;
46266 else
46267diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
46268index 60c0804..d814f98 100644
46269--- a/fs/reiserfs/do_balan.c
46270+++ b/fs/reiserfs/do_balan.c
46271@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
46272 return;
46273 }
46274
46275- atomic_inc(&(fs_generation(tb->tb_sb)));
46276+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46277 do_balance_starts(tb);
46278
46279 /* balance leaf returns 0 except if combining L R and S into
46280diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
46281index 7a99811..a7c96c4 100644
46282--- a/fs/reiserfs/procfs.c
46283+++ b/fs/reiserfs/procfs.c
46284@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
46285 "SMALL_TAILS " : "NO_TAILS ",
46286 replay_only(sb) ? "REPLAY_ONLY " : "",
46287 convert_reiserfs(sb) ? "CONV " : "",
46288- atomic_read(&r->s_generation_counter),
46289+ atomic_read_unchecked(&r->s_generation_counter),
46290 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46291 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46292 SF(s_good_search_by_key_reada), SF(s_bmaps),
46293diff --git a/fs/select.c b/fs/select.c
46294index d33418f..2a5345e 100644
46295--- a/fs/select.c
46296+++ b/fs/select.c
46297@@ -20,6 +20,7 @@
46298 #include <linux/module.h>
46299 #include <linux/slab.h>
46300 #include <linux/poll.h>
46301+#include <linux/security.h>
46302 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46303 #include <linux/file.h>
46304 #include <linux/fdtable.h>
46305@@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
46306 struct poll_list *walk = head;
46307 unsigned long todo = nfds;
46308
46309+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46310 if (nfds > rlimit(RLIMIT_NOFILE))
46311 return -EINVAL;
46312
46313diff --git a/fs/seq_file.c b/fs/seq_file.c
46314index dba43c3..a99fb63 100644
46315--- a/fs/seq_file.c
46316+++ b/fs/seq_file.c
46317@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46318 return 0;
46319 }
46320 if (!m->buf) {
46321- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46322+ m->size = PAGE_SIZE;
46323+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46324 if (!m->buf)
46325 return -ENOMEM;
46326 }
46327@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46328 Eoverflow:
46329 m->op->stop(m, p);
46330 kfree(m->buf);
46331- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46332+ m->size <<= 1;
46333+ m->buf = kmalloc(m->size, GFP_KERNEL);
46334 return !m->buf ? -ENOMEM : -EAGAIN;
46335 }
46336
46337@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46338 m->version = file->f_version;
46339 /* grab buffer if we didn't have one */
46340 if (!m->buf) {
46341- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46342+ m->size = PAGE_SIZE;
46343+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46344 if (!m->buf)
46345 goto Enomem;
46346 }
46347@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46348 goto Fill;
46349 m->op->stop(m, p);
46350 kfree(m->buf);
46351- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46352+ m->size <<= 1;
46353+ m->buf = kmalloc(m->size, GFP_KERNEL);
46354 if (!m->buf)
46355 goto Enomem;
46356 m->count = 0;
46357@@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
46358 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46359 void *data)
46360 {
46361- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46362+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46363 int res = -ENOMEM;
46364
46365 if (op) {
46366diff --git a/fs/splice.c b/fs/splice.c
46367index fa2defa..8601650 100644
46368--- a/fs/splice.c
46369+++ b/fs/splice.c
46370@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46371 pipe_lock(pipe);
46372
46373 for (;;) {
46374- if (!pipe->readers) {
46375+ if (!atomic_read(&pipe->readers)) {
46376 send_sig(SIGPIPE, current, 0);
46377 if (!ret)
46378 ret = -EPIPE;
46379@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46380 do_wakeup = 0;
46381 }
46382
46383- pipe->waiting_writers++;
46384+ atomic_inc(&pipe->waiting_writers);
46385 pipe_wait(pipe);
46386- pipe->waiting_writers--;
46387+ atomic_dec(&pipe->waiting_writers);
46388 }
46389
46390 pipe_unlock(pipe);
46391@@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
46392 old_fs = get_fs();
46393 set_fs(get_ds());
46394 /* The cast to a user pointer is valid due to the set_fs() */
46395- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46396+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46397 set_fs(old_fs);
46398
46399 return res;
46400@@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
46401 old_fs = get_fs();
46402 set_fs(get_ds());
46403 /* The cast to a user pointer is valid due to the set_fs() */
46404- res = vfs_write(file, (const char __user *)buf, count, &pos);
46405+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46406 set_fs(old_fs);
46407
46408 return res;
46409@@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
46410 goto err;
46411
46412 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46413- vec[i].iov_base = (void __user *) page_address(page);
46414+ vec[i].iov_base = (void __force_user *) page_address(page);
46415 vec[i].iov_len = this_len;
46416 spd.pages[i] = page;
46417 spd.nr_pages++;
46418@@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46419 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46420 {
46421 while (!pipe->nrbufs) {
46422- if (!pipe->writers)
46423+ if (!atomic_read(&pipe->writers))
46424 return 0;
46425
46426- if (!pipe->waiting_writers && sd->num_spliced)
46427+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46428 return 0;
46429
46430 if (sd->flags & SPLICE_F_NONBLOCK)
46431@@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
46432 * out of the pipe right after the splice_to_pipe(). So set
46433 * PIPE_READERS appropriately.
46434 */
46435- pipe->readers = 1;
46436+ atomic_set(&pipe->readers, 1);
46437
46438 current->splice_pipe = pipe;
46439 }
46440@@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46441 ret = -ERESTARTSYS;
46442 break;
46443 }
46444- if (!pipe->writers)
46445+ if (!atomic_read(&pipe->writers))
46446 break;
46447- if (!pipe->waiting_writers) {
46448+ if (!atomic_read(&pipe->waiting_writers)) {
46449 if (flags & SPLICE_F_NONBLOCK) {
46450 ret = -EAGAIN;
46451 break;
46452@@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46453 pipe_lock(pipe);
46454
46455 while (pipe->nrbufs >= pipe->buffers) {
46456- if (!pipe->readers) {
46457+ if (!atomic_read(&pipe->readers)) {
46458 send_sig(SIGPIPE, current, 0);
46459 ret = -EPIPE;
46460 break;
46461@@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46462 ret = -ERESTARTSYS;
46463 break;
46464 }
46465- pipe->waiting_writers++;
46466+ atomic_inc(&pipe->waiting_writers);
46467 pipe_wait(pipe);
46468- pipe->waiting_writers--;
46469+ atomic_dec(&pipe->waiting_writers);
46470 }
46471
46472 pipe_unlock(pipe);
46473@@ -1819,14 +1819,14 @@ retry:
46474 pipe_double_lock(ipipe, opipe);
46475
46476 do {
46477- if (!opipe->readers) {
46478+ if (!atomic_read(&opipe->readers)) {
46479 send_sig(SIGPIPE, current, 0);
46480 if (!ret)
46481 ret = -EPIPE;
46482 break;
46483 }
46484
46485- if (!ipipe->nrbufs && !ipipe->writers)
46486+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46487 break;
46488
46489 /*
46490@@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46491 pipe_double_lock(ipipe, opipe);
46492
46493 do {
46494- if (!opipe->readers) {
46495+ if (!atomic_read(&opipe->readers)) {
46496 send_sig(SIGPIPE, current, 0);
46497 if (!ret)
46498 ret = -EPIPE;
46499@@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46500 * return EAGAIN if we have the potential of some data in the
46501 * future, otherwise just return 0
46502 */
46503- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46504+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46505 ret = -EAGAIN;
46506
46507 pipe_unlock(ipipe);
46508diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
46509index d4e6080b..0e58b99 100644
46510--- a/fs/sysfs/file.c
46511+++ b/fs/sysfs/file.c
46512@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
46513
46514 struct sysfs_open_dirent {
46515 atomic_t refcnt;
46516- atomic_t event;
46517+ atomic_unchecked_t event;
46518 wait_queue_head_t poll;
46519 struct list_head buffers; /* goes through sysfs_buffer.list */
46520 };
46521@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
46522 if (!sysfs_get_active(attr_sd))
46523 return -ENODEV;
46524
46525- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46526+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46527 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46528
46529 sysfs_put_active(attr_sd);
46530@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
46531 return -ENOMEM;
46532
46533 atomic_set(&new_od->refcnt, 0);
46534- atomic_set(&new_od->event, 1);
46535+ atomic_set_unchecked(&new_od->event, 1);
46536 init_waitqueue_head(&new_od->poll);
46537 INIT_LIST_HEAD(&new_od->buffers);
46538 goto retry;
46539@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
46540
46541 sysfs_put_active(attr_sd);
46542
46543- if (buffer->event != atomic_read(&od->event))
46544+ if (buffer->event != atomic_read_unchecked(&od->event))
46545 goto trigger;
46546
46547 return DEFAULT_POLLMASK;
46548@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
46549
46550 od = sd->s_attr.open;
46551 if (od) {
46552- atomic_inc(&od->event);
46553+ atomic_inc_unchecked(&od->event);
46554 wake_up_interruptible(&od->poll);
46555 }
46556
46557diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
46558index e34f0d9..740ea7b 100644
46559--- a/fs/sysfs/mount.c
46560+++ b/fs/sysfs/mount.c
46561@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46562 .s_name = "",
46563 .s_count = ATOMIC_INIT(1),
46564 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
46565+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46566+ .s_mode = S_IFDIR | S_IRWXU,
46567+#else
46568 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46569+#endif
46570 .s_ino = 1,
46571 };
46572
46573diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
46574index a7ac78f..02158e1 100644
46575--- a/fs/sysfs/symlink.c
46576+++ b/fs/sysfs/symlink.c
46577@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46578
46579 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46580 {
46581- char *page = nd_get_link(nd);
46582+ const char *page = nd_get_link(nd);
46583 if (!IS_ERR(page))
46584 free_page((unsigned long)page);
46585 }
46586diff --git a/fs/udf/misc.c b/fs/udf/misc.c
46587index c175b4d..8f36a16 100644
46588--- a/fs/udf/misc.c
46589+++ b/fs/udf/misc.c
46590@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
46591
46592 u8 udf_tag_checksum(const struct tag *t)
46593 {
46594- u8 *data = (u8 *)t;
46595+ const u8 *data = (const u8 *)t;
46596 u8 checksum = 0;
46597 int i;
46598 for (i = 0; i < sizeof(struct tag); ++i)
46599diff --git a/fs/utimes.c b/fs/utimes.c
46600index ba653f3..06ea4b1 100644
46601--- a/fs/utimes.c
46602+++ b/fs/utimes.c
46603@@ -1,6 +1,7 @@
46604 #include <linux/compiler.h>
46605 #include <linux/file.h>
46606 #include <linux/fs.h>
46607+#include <linux/security.h>
46608 #include <linux/linkage.h>
46609 #include <linux/mount.h>
46610 #include <linux/namei.h>
46611@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
46612 goto mnt_drop_write_and_out;
46613 }
46614 }
46615+
46616+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46617+ error = -EACCES;
46618+ goto mnt_drop_write_and_out;
46619+ }
46620+
46621 mutex_lock(&inode->i_mutex);
46622 error = notify_change(path->dentry, &newattrs);
46623 mutex_unlock(&inode->i_mutex);
46624diff --git a/fs/xattr.c b/fs/xattr.c
46625index 67583de..c5aad14 100644
46626--- a/fs/xattr.c
46627+++ b/fs/xattr.c
46628@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46629 * Extended attribute SET operations
46630 */
46631 static long
46632-setxattr(struct dentry *d, const char __user *name, const void __user *value,
46633+setxattr(struct path *path, const char __user *name, const void __user *value,
46634 size_t size, int flags)
46635 {
46636 int error;
46637@@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
46638 return PTR_ERR(kvalue);
46639 }
46640
46641- error = vfs_setxattr(d, kname, kvalue, size, flags);
46642+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46643+ error = -EACCES;
46644+ goto out;
46645+ }
46646+
46647+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46648+out:
46649 kfree(kvalue);
46650 return error;
46651 }
46652@@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
46653 return error;
46654 error = mnt_want_write(path.mnt);
46655 if (!error) {
46656- error = setxattr(path.dentry, name, value, size, flags);
46657+ error = setxattr(&path, name, value, size, flags);
46658 mnt_drop_write(path.mnt);
46659 }
46660 path_put(&path);
46661@@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
46662 return error;
46663 error = mnt_want_write(path.mnt);
46664 if (!error) {
46665- error = setxattr(path.dentry, name, value, size, flags);
46666+ error = setxattr(&path, name, value, size, flags);
46667 mnt_drop_write(path.mnt);
46668 }
46669 path_put(&path);
46670@@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
46671 const void __user *,value, size_t, size, int, flags)
46672 {
46673 struct file *f;
46674- struct dentry *dentry;
46675 int error = -EBADF;
46676
46677 f = fget(fd);
46678 if (!f)
46679 return error;
46680- dentry = f->f_path.dentry;
46681- audit_inode(NULL, dentry);
46682+ audit_inode(NULL, f->f_path.dentry);
46683 error = mnt_want_write_file(f);
46684 if (!error) {
46685- error = setxattr(dentry, name, value, size, flags);
46686+ error = setxattr(&f->f_path, name, value, size, flags);
46687 mnt_drop_write(f->f_path.mnt);
46688 }
46689 fput(f);
46690diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
46691index 8d5a506..7f62712 100644
46692--- a/fs/xattr_acl.c
46693+++ b/fs/xattr_acl.c
46694@@ -17,8 +17,8 @@
46695 struct posix_acl *
46696 posix_acl_from_xattr(const void *value, size_t size)
46697 {
46698- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46699- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46700+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46701+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46702 int count;
46703 struct posix_acl *acl;
46704 struct posix_acl_entry *acl_e;
46705diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
46706index d0ab788..827999b 100644
46707--- a/fs/xfs/xfs_bmap.c
46708+++ b/fs/xfs/xfs_bmap.c
46709@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
46710 int nmap,
46711 int ret_nmap);
46712 #else
46713-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46714+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46715 #endif /* DEBUG */
46716
46717 STATIC int
46718diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
46719index 79d05e8..e3e5861 100644
46720--- a/fs/xfs/xfs_dir2_sf.c
46721+++ b/fs/xfs/xfs_dir2_sf.c
46722@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
46723 }
46724
46725 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
46726- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
46727+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46728+ char name[sfep->namelen];
46729+ memcpy(name, sfep->name, sfep->namelen);
46730+ if (filldir(dirent, name, sfep->namelen,
46731+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
46732+ *offset = off & 0x7fffffff;
46733+ return 0;
46734+ }
46735+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
46736 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46737 *offset = off & 0x7fffffff;
46738 return 0;
46739diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
46740index d99a905..9f88202 100644
46741--- a/fs/xfs/xfs_ioctl.c
46742+++ b/fs/xfs/xfs_ioctl.c
46743@@ -128,7 +128,7 @@ xfs_find_handle(
46744 }
46745
46746 error = -EFAULT;
46747- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46748+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46749 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46750 goto out_put;
46751
46752diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
46753index 23ce927..e274cc1 100644
46754--- a/fs/xfs/xfs_iops.c
46755+++ b/fs/xfs/xfs_iops.c
46756@@ -447,7 +447,7 @@ xfs_vn_put_link(
46757 struct nameidata *nd,
46758 void *p)
46759 {
46760- char *s = nd_get_link(nd);
46761+ const char *s = nd_get_link(nd);
46762
46763 if (!IS_ERR(s))
46764 kfree(s);
46765diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
46766new file mode 100644
46767index 0000000..fbe6950d9
46768--- /dev/null
46769+++ b/grsecurity/Kconfig
46770@@ -0,0 +1,1067 @@
46771+#
46772+# grecurity configuration
46773+#
46774+
46775+menu "Grsecurity"
46776+
46777+config GRKERNSEC
46778+ bool "Grsecurity"
46779+ select CRYPTO
46780+ select CRYPTO_SHA256
46781+ help
46782+ If you say Y here, you will be able to configure many features
46783+ that will enhance the security of your system. It is highly
46784+ recommended that you say Y here and read through the help
46785+ for each option so that you fully understand the features and
46786+ can evaluate their usefulness for your machine.
46787+
46788+choice
46789+ prompt "Security Level"
46790+ depends on GRKERNSEC
46791+ default GRKERNSEC_CUSTOM
46792+
46793+config GRKERNSEC_LOW
46794+ bool "Low"
46795+ select GRKERNSEC_LINK
46796+ select GRKERNSEC_FIFO
46797+ select GRKERNSEC_RANDNET
46798+ select GRKERNSEC_DMESG
46799+ select GRKERNSEC_CHROOT
46800+ select GRKERNSEC_CHROOT_CHDIR
46801+
46802+ help
46803+ If you choose this option, several of the grsecurity options will
46804+ be enabled that will give you greater protection against a number
46805+ of attacks, while assuring that none of your software will have any
46806+ conflicts with the additional security measures. If you run a lot
46807+ of unusual software, or you are having problems with the higher
46808+ security levels, you should say Y here. With this option, the
46809+ following features are enabled:
46810+
46811+ - Linking restrictions
46812+ - FIFO restrictions
46813+ - Restricted dmesg
46814+ - Enforced chdir("/") on chroot
46815+ - Runtime module disabling
46816+
46817+config GRKERNSEC_MEDIUM
46818+ bool "Medium"
46819+ select PAX
46820+ select PAX_EI_PAX
46821+ select PAX_PT_PAX_FLAGS
46822+ select PAX_HAVE_ACL_FLAGS
46823+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
46824+ select GRKERNSEC_CHROOT
46825+ select GRKERNSEC_CHROOT_SYSCTL
46826+ select GRKERNSEC_LINK
46827+ select GRKERNSEC_FIFO
46828+ select GRKERNSEC_DMESG
46829+ select GRKERNSEC_RANDNET
46830+ select GRKERNSEC_FORKFAIL
46831+ select GRKERNSEC_TIME
46832+ select GRKERNSEC_SIGNAL
46833+ select GRKERNSEC_CHROOT
46834+ select GRKERNSEC_CHROOT_UNIX
46835+ select GRKERNSEC_CHROOT_MOUNT
46836+ select GRKERNSEC_CHROOT_PIVOT
46837+ select GRKERNSEC_CHROOT_DOUBLE
46838+ select GRKERNSEC_CHROOT_CHDIR
46839+ select GRKERNSEC_CHROOT_MKNOD
46840+ select GRKERNSEC_PROC
46841+ select GRKERNSEC_PROC_USERGROUP
46842+ select PAX_RANDUSTACK
46843+ select PAX_ASLR
46844+ select PAX_RANDMMAP
46845+ select PAX_REFCOUNT if (X86 || SPARC64)
46846+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
46847+
46848+ help
46849+ If you say Y here, several features in addition to those included
46850+ in the low additional security level will be enabled. These
46851+ features provide even more security to your system, though in rare
46852+ cases they may be incompatible with very old or poorly written
46853+ software. If you enable this option, make sure that your auth
46854+ service (identd) is running as gid 1001. With this option,
46855+ the following features (in addition to those provided in the
46856+ low additional security level) will be enabled:
46857+
46858+ - Failed fork logging
46859+ - Time change logging
46860+ - Signal logging
46861+ - Deny mounts in chroot
46862+ - Deny double chrooting
46863+ - Deny sysctl writes in chroot
46864+ - Deny mknod in chroot
46865+ - Deny access to abstract AF_UNIX sockets out of chroot
46866+ - Deny pivot_root in chroot
46867+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
46868+ - /proc restrictions with special GID set to 10 (usually wheel)
46869+ - Address Space Layout Randomization (ASLR)
46870+ - Prevent exploitation of most refcount overflows
46871+ - Bounds checking of copying between the kernel and userland
46872+
46873+config GRKERNSEC_HIGH
46874+ bool "High"
46875+ select GRKERNSEC_LINK
46876+ select GRKERNSEC_FIFO
46877+ select GRKERNSEC_DMESG
46878+ select GRKERNSEC_FORKFAIL
46879+ select GRKERNSEC_TIME
46880+ select GRKERNSEC_SIGNAL
46881+ select GRKERNSEC_CHROOT
46882+ select GRKERNSEC_CHROOT_SHMAT
46883+ select GRKERNSEC_CHROOT_UNIX
46884+ select GRKERNSEC_CHROOT_MOUNT
46885+ select GRKERNSEC_CHROOT_FCHDIR
46886+ select GRKERNSEC_CHROOT_PIVOT
46887+ select GRKERNSEC_CHROOT_DOUBLE
46888+ select GRKERNSEC_CHROOT_CHDIR
46889+ select GRKERNSEC_CHROOT_MKNOD
46890+ select GRKERNSEC_CHROOT_CAPS
46891+ select GRKERNSEC_CHROOT_SYSCTL
46892+ select GRKERNSEC_CHROOT_FINDTASK
46893+ select GRKERNSEC_SYSFS_RESTRICT
46894+ select GRKERNSEC_PROC
46895+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
46896+ select GRKERNSEC_HIDESYM
46897+ select GRKERNSEC_BRUTE
46898+ select GRKERNSEC_PROC_USERGROUP
46899+ select GRKERNSEC_KMEM
46900+ select GRKERNSEC_RESLOG
46901+ select GRKERNSEC_RANDNET
46902+ select GRKERNSEC_PROC_ADD
46903+ select GRKERNSEC_CHROOT_CHMOD
46904+ select GRKERNSEC_CHROOT_NICE
46905+ select GRKERNSEC_SETXID
46906+ select GRKERNSEC_AUDIT_MOUNT
46907+ select GRKERNSEC_MODHARDEN if (MODULES)
46908+ select GRKERNSEC_HARDEN_PTRACE
46909+ select GRKERNSEC_PTRACE_READEXEC
46910+ select GRKERNSEC_VM86 if (X86_32)
46911+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
46912+ select PAX
46913+ select PAX_RANDUSTACK
46914+ select PAX_ASLR
46915+ select PAX_RANDMMAP
46916+ select PAX_NOEXEC
46917+ select PAX_MPROTECT
46918+ select PAX_EI_PAX
46919+ select PAX_PT_PAX_FLAGS
46920+ select PAX_HAVE_ACL_FLAGS
46921+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
46922+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
46923+ select PAX_RANDKSTACK if (X86_TSC && X86)
46924+ select PAX_SEGMEXEC if (X86_32)
46925+ select PAX_PAGEEXEC
46926+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
46927+ select PAX_EMUTRAMP if (PARISC)
46928+ select PAX_EMUSIGRT if (PARISC)
46929+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
46930+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
46931+ select PAX_REFCOUNT if (X86 || SPARC64)
46932+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
46933+ help
46934+ If you say Y here, many of the features of grsecurity will be
46935+ enabled, which will protect you against many kinds of attacks
46936+ against your system. The heightened security comes at a cost
46937+ of an increased chance of incompatibilities with rare software
46938+ on your machine. Since this security level enables PaX, you should
46939+ view <http://pax.grsecurity.net> and read about the PaX
46940+ project. While you are there, download chpax and run it on
46941+ binaries that cause problems with PaX. Also remember that
46942+ since the /proc restrictions are enabled, you must run your
46943+ identd as gid 1001. This security level enables the following
46944+ features in addition to those listed in the low and medium
46945+ security levels:
46946+
46947+ - Additional /proc restrictions
46948+ - Chmod restrictions in chroot
46949+ - No signals, ptrace, or viewing of processes outside of chroot
46950+ - Capability restrictions in chroot
46951+ - Deny fchdir out of chroot
46952+ - Priority restrictions in chroot
46953+ - Segmentation-based implementation of PaX
46954+ - Mprotect restrictions
46955+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
46956+ - Kernel stack randomization
46957+ - Mount/unmount/remount logging
46958+ - Kernel symbol hiding
46959+ - Hardening of module auto-loading
46960+ - Ptrace restrictions
46961+ - Restricted vm86 mode
46962+ - Restricted sysfs/debugfs
46963+ - Active kernel exploit response
46964+
46965+config GRKERNSEC_CUSTOM
46966+ bool "Custom"
46967+ help
46968+ If you say Y here, you will be able to configure every grsecurity
46969+ option, which allows you to enable many more features that aren't
46970+ covered in the basic security levels. These additional features
46971+ include TPE, socket restrictions, and the sysctl system for
46972+ grsecurity. It is advised that you read through the help for
46973+ each option to determine its usefulness in your situation.
46974+
46975+endchoice
46976+
46977+menu "Address Space Protection"
46978+depends on GRKERNSEC
46979+
46980+config GRKERNSEC_KMEM
46981+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
46982+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
46983+ help
46984+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
46985+ be written to or read from to modify or leak the contents of the running
46986+ kernel. /dev/port will also not be allowed to be opened. If you have module
46987+ support disabled, enabling this will close up four ways that are
46988+ currently used to insert malicious code into the running kernel.
46989+ Even with all these features enabled, we still highly recommend that
46990+ you use the RBAC system, as it is still possible for an attacker to
46991+ modify the running kernel through privileged I/O granted by ioperm/iopl.
46992+ If you are not using XFree86, you may be able to stop this additional
46993+ case by enabling the 'Disable privileged I/O' option. Though nothing
46994+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
46995+ but only to video memory, which is the only writing we allow in this
46996+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
46997+ not be allowed to mprotect it with PROT_WRITE later.
46998+ It is highly recommended that you say Y here if you meet all the
46999+ conditions above.
47000+
47001+config GRKERNSEC_VM86
47002+ bool "Restrict VM86 mode"
47003+ depends on X86_32
47004+
47005+ help
47006+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
47007+ make use of a special execution mode on 32bit x86 processors called
47008+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
47009+ video cards and will still work with this option enabled. The purpose
47010+ of the option is to prevent exploitation of emulation errors in
47011+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
47012+ Nearly all users should be able to enable this option.
47013+
47014+config GRKERNSEC_IO
47015+ bool "Disable privileged I/O"
47016+ depends on X86
47017+ select RTC_CLASS
47018+ select RTC_INTF_DEV
47019+ select RTC_DRV_CMOS
47020+
47021+ help
47022+ If you say Y here, all ioperm and iopl calls will return an error.
47023+ Ioperm and iopl can be used to modify the running kernel.
47024+ Unfortunately, some programs need this access to operate properly,
47025+ the most notable of which are XFree86 and hwclock. hwclock can be
47026+ remedied by having RTC support in the kernel, so real-time
47027+ clock support is enabled if this option is enabled, to ensure
47028+ that hwclock operates correctly. XFree86 still will not
47029+ operate correctly with this option enabled, so DO NOT CHOOSE Y
47030+ IF YOU USE XFree86. If you use XFree86 and you still want to
47031+ protect your kernel against modification, use the RBAC system.
47032+
47033+config GRKERNSEC_PROC_MEMMAP
47034+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
47035+ default y if (PAX_NOEXEC || PAX_ASLR)
47036+ depends on PAX_NOEXEC || PAX_ASLR
47037+ help
47038+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
47039+ give no information about the addresses of its mappings if
47040+ PaX features that rely on random addresses are enabled on the task.
47041+ If you use PaX it is greatly recommended that you say Y here as it
47042+ closes up a hole that makes the full ASLR useless for suid
47043+ binaries.
47044+
47045+config GRKERNSEC_BRUTE
47046+ bool "Deter exploit bruteforcing"
47047+ help
47048+ If you say Y here, attempts to bruteforce exploits against forking
47049+ daemons such as apache or sshd, as well as against suid/sgid binaries
47050+ will be deterred. When a child of a forking daemon is killed by PaX
47051+ or crashes due to an illegal instruction or other suspicious signal,
47052+ the parent process will be delayed 30 seconds upon every subsequent
47053+ fork until the administrator is able to assess the situation and
47054+ restart the daemon.
47055+ In the suid/sgid case, the attempt is logged, the user has all their
47056+ processes terminated, and they are prevented from executing any further
47057+ processes for 15 minutes.
47058+ It is recommended that you also enable signal logging in the auditing
47059+ section so that logs are generated when a process triggers a suspicious
47060+ signal.
47061+ If the sysctl option is enabled, a sysctl option with name
47062+ "deter_bruteforce" is created.
47063+
47064+
47065+config GRKERNSEC_MODHARDEN
47066+ bool "Harden module auto-loading"
47067+ depends on MODULES
47068+ help
47069+ If you say Y here, module auto-loading in response to use of some
47070+ feature implemented by an unloaded module will be restricted to
47071+ root users. Enabling this option helps defend against attacks
47072+ by unprivileged users who abuse the auto-loading behavior to
47073+ cause a vulnerable module to load that is then exploited.
47074+
47075+ If this option prevents a legitimate use of auto-loading for a
47076+ non-root user, the administrator can execute modprobe manually
47077+ with the exact name of the module mentioned in the alert log.
47078+ Alternatively, the administrator can add the module to the list
47079+ of modules loaded at boot by modifying init scripts.
47080+
47081+ Modification of init scripts will most likely be needed on
47082+ Ubuntu servers with encrypted home directory support enabled,
47083+ as the first non-root user logging in will cause the ecb(aes),
47084+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
47085+
47086+config GRKERNSEC_HIDESYM
47087+ bool "Hide kernel symbols"
47088+ help
47089+ If you say Y here, getting information on loaded modules, and
47090+ displaying all kernel symbols through a syscall will be restricted
47091+ to users with CAP_SYS_MODULE. For software compatibility reasons,
47092+ /proc/kallsyms will be restricted to the root user. The RBAC
47093+ system can hide that entry even from root.
47094+
47095+ This option also prevents leaking of kernel addresses through
47096+ several /proc entries.
47097+
47098+ Note that this option is only effective provided the following
47099+ conditions are met:
47100+ 1) The kernel using grsecurity is not precompiled by some distribution
47101+ 2) You have also enabled GRKERNSEC_DMESG
47102+ 3) You are using the RBAC system and hiding other files such as your
47103+ kernel image and System.map. Alternatively, enabling this option
47104+ causes the permissions on /boot, /lib/modules, and the kernel
47105+ source directory to change at compile time to prevent
47106+ reading by non-root users.
47107+ If the above conditions are met, this option will aid in providing a
47108+ useful protection against local kernel exploitation of overflows
47109+ and arbitrary read/write vulnerabilities.
47110+
47111+config GRKERNSEC_KERN_LOCKOUT
47112+ bool "Active kernel exploit response"
47113+ depends on X86 || ARM || PPC || SPARC
47114+ help
47115+ If you say Y here, when a PaX alert is triggered due to suspicious
47116+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
47117+ or an OOPs occurs due to bad memory accesses, instead of just
47118+ terminating the offending process (and potentially allowing
47119+ a subsequent exploit from the same user), we will take one of two
47120+ actions:
47121+ If the user was root, we will panic the system
47122+ If the user was non-root, we will log the attempt, terminate
47123+ all processes owned by the user, then prevent them from creating
47124+ any new processes until the system is restarted
47125+ This deters repeated kernel exploitation/bruteforcing attempts
47126+ and is useful for later forensics.
47127+
47128+endmenu
47129+menu "Role Based Access Control Options"
47130+depends on GRKERNSEC
47131+
47132+config GRKERNSEC_RBAC_DEBUG
47133+ bool
47134+
47135+config GRKERNSEC_NO_RBAC
47136+ bool "Disable RBAC system"
47137+ help
47138+ If you say Y here, the /dev/grsec device will be removed from the kernel,
47139+ preventing the RBAC system from being enabled. You should only say Y
47140+ here if you have no intention of using the RBAC system, so as to prevent
47141+ an attacker with root access from misusing the RBAC system to hide files
47142+ and processes when loadable module support and /dev/[k]mem have been
47143+ locked down.
47144+
47145+config GRKERNSEC_ACL_HIDEKERN
47146+ bool "Hide kernel processes"
47147+ help
47148+ If you say Y here, all kernel threads will be hidden to all
47149+ processes but those whose subject has the "view hidden processes"
47150+ flag.
47151+
47152+config GRKERNSEC_ACL_MAXTRIES
47153+ int "Maximum tries before password lockout"
47154+ default 3
47155+ help
47156+ This option enforces the maximum number of times a user can attempt
47157+ to authorize themselves with the grsecurity RBAC system before being
47158+ denied the ability to attempt authorization again for a specified time.
47159+ The lower the number, the harder it will be to brute-force a password.
47160+
47161+config GRKERNSEC_ACL_TIMEOUT
47162+ int "Time to wait after max password tries, in seconds"
47163+ default 30
47164+ help
47165+ This option specifies the time the user must wait after attempting to
47166+ authorize to the RBAC system with the maximum number of invalid
47167+ passwords. The higher the number, the harder it will be to brute-force
47168+ a password.
47169+
47170+endmenu
47171+menu "Filesystem Protections"
47172+depends on GRKERNSEC
47173+
47174+config GRKERNSEC_PROC
47175+ bool "Proc restrictions"
47176+ help
47177+ If you say Y here, the permissions of the /proc filesystem
47178+ will be altered to enhance system security and privacy. You MUST
47179+ choose either a user only restriction or a user and group restriction.
47180+ Depending upon the option you choose, you can either restrict users to
47181+ see only the processes they themselves run, or choose a group that can
47182+ view all processes and files normally restricted to root if you choose
47183+ the "restrict to user only" option. NOTE: If you're running identd as
47184+ a non-root user, you will have to run it as the group you specify here.
47185+
47186+config GRKERNSEC_PROC_USER
47187+ bool "Restrict /proc to user only"
47188+ depends on GRKERNSEC_PROC
47189+ help
47190+ If you say Y here, non-root users will only be able to view their own
47191+ processes, and restricts them from viewing network-related information,
47192+ and viewing kernel symbol and module information.
47193+
47194+config GRKERNSEC_PROC_USERGROUP
47195+ bool "Allow special group"
47196+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
47197+ help
47198+ If you say Y here, you will be able to select a group that will be
47199+ able to view all processes and network-related information. If you've
47200+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
47201+ remain hidden. This option is useful if you want to run identd as
47202+ a non-root user.
47203+
47204+config GRKERNSEC_PROC_GID
47205+ int "GID for special group"
47206+ depends on GRKERNSEC_PROC_USERGROUP
47207+ default 1001
47208+
47209+config GRKERNSEC_PROC_ADD
47210+ bool "Additional restrictions"
47211+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
47212+ help
47213+ If you say Y here, additional restrictions will be placed on
47214+ /proc that keep normal users from viewing device information and
47215+ slabinfo information that could be useful for exploits.
47216+
47217+config GRKERNSEC_LINK
47218+ bool "Linking restrictions"
47219+ help
47220+ If you say Y here, /tmp race exploits will be prevented, since users
47221+ will no longer be able to follow symlinks owned by other users in
47222+ world-writable +t directories (e.g. /tmp), unless the owner of the
47223+ symlink is the owner of the directory. users will also not be
47224+ able to hardlink to files they do not own. If the sysctl option is
47225+ enabled, a sysctl option with name "linking_restrictions" is created.
47226+
47227+config GRKERNSEC_FIFO
47228+ bool "FIFO restrictions"
47229+ help
47230+ If you say Y here, users will not be able to write to FIFOs they don't
47231+ own in world-writable +t directories (e.g. /tmp), unless the owner of
47232+ the FIFO is the same owner of the directory it's held in. If the sysctl
47233+ option is enabled, a sysctl option with name "fifo_restrictions" is
47234+ created.
47235+
47236+config GRKERNSEC_SYSFS_RESTRICT
47237+ bool "Sysfs/debugfs restriction"
47238+ depends on SYSFS
47239+ help
47240+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
47241+ any filesystem normally mounted under it (e.g. debugfs) will only
47242+ be accessible by root. These filesystems generally provide access
47243+ to hardware and debug information that isn't appropriate for unprivileged
47244+ users of the system. Sysfs and debugfs have also become a large source
47245+ of new vulnerabilities, ranging from infoleaks to local compromise.
47246+ There has been very little oversight with an eye toward security involved
47247+ in adding new exporters of information to these filesystems, so their
47248+ use is discouraged.
47249+ This option is equivalent to a chmod 0700 of the mount paths.
47250+
47251+config GRKERNSEC_ROFS
47252+ bool "Runtime read-only mount protection"
47253+ help
47254+ If you say Y here, a sysctl option with name "romount_protect" will
47255+ be created. By setting this option to 1 at runtime, filesystems
47256+ will be protected in the following ways:
47257+ * No new writable mounts will be allowed
47258+ * Existing read-only mounts won't be able to be remounted read/write
47259+ * Write operations will be denied on all block devices
47260+ This option acts independently of grsec_lock: once it is set to 1,
47261+ it cannot be turned off. Therefore, please be mindful of the resulting
47262+ behavior if this option is enabled in an init script on a read-only
47263+ filesystem. This feature is mainly intended for secure embedded systems.
47264+
47265+config GRKERNSEC_CHROOT
47266+ bool "Chroot jail restrictions"
47267+ help
47268+ If you say Y here, you will be able to choose several options that will
47269+ make breaking out of a chrooted jail much more difficult. If you
47270+ encounter no software incompatibilities with the following options, it
47271+ is recommended that you enable each one.
47272+
47273+config GRKERNSEC_CHROOT_MOUNT
47274+ bool "Deny mounts"
47275+ depends on GRKERNSEC_CHROOT
47276+ help
47277+ If you say Y here, processes inside a chroot will not be able to
47278+ mount or remount filesystems. If the sysctl option is enabled, a
47279+ sysctl option with name "chroot_deny_mount" is created.
47280+
47281+config GRKERNSEC_CHROOT_DOUBLE
47282+ bool "Deny double-chroots"
47283+ depends on GRKERNSEC_CHROOT
47284+ help
47285+ If you say Y here, processes inside a chroot will not be able to chroot
47286+ again outside the chroot. This is a widely used method of breaking
47287+ out of a chroot jail and should not be allowed. If the sysctl
47288+ option is enabled, a sysctl option with name
47289+ "chroot_deny_chroot" is created.
47290+
47291+config GRKERNSEC_CHROOT_PIVOT
47292+ bool "Deny pivot_root in chroot"
47293+ depends on GRKERNSEC_CHROOT
47294+ help
47295+ If you say Y here, processes inside a chroot will not be able to use
47296+ a function called pivot_root() that was introduced in Linux 2.3.41. It
47297+ works similar to chroot in that it changes the root filesystem. This
47298+ function could be misused in a chrooted process to attempt to break out
47299+ of the chroot, and therefore should not be allowed. If the sysctl
47300+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
47301+ created.
47302+
47303+config GRKERNSEC_CHROOT_CHDIR
47304+ bool "Enforce chdir(\"/\") on all chroots"
47305+ depends on GRKERNSEC_CHROOT
47306+ help
47307+ If you say Y here, the current working directory of all newly-chrooted
47308+ applications will be set to the the root directory of the chroot.
47309+ The man page on chroot(2) states:
47310+ Note that this call does not change the current working
47311+ directory, so that `.' can be outside the tree rooted at
47312+ `/'. In particular, the super-user can escape from a
47313+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
47314+
47315+ It is recommended that you say Y here, since it's not known to break
47316+ any software. If the sysctl option is enabled, a sysctl option with
47317+ name "chroot_enforce_chdir" is created.
47318+
47319+config GRKERNSEC_CHROOT_CHMOD
47320+ bool "Deny (f)chmod +s"
47321+ depends on GRKERNSEC_CHROOT
47322+ help
47323+ If you say Y here, processes inside a chroot will not be able to chmod
47324+ or fchmod files to make them have suid or sgid bits. This protects
47325+ against another published method of breaking a chroot. If the sysctl
47326+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
47327+ created.
47328+
47329+config GRKERNSEC_CHROOT_FCHDIR
47330+ bool "Deny fchdir out of chroot"
47331+ depends on GRKERNSEC_CHROOT
47332+ help
47333+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
47334+ to a file descriptor of the chrooting process that points to a directory
47335+ outside the filesystem will be stopped. If the sysctl option
47336+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
47337+
47338+config GRKERNSEC_CHROOT_MKNOD
47339+ bool "Deny mknod"
47340+ depends on GRKERNSEC_CHROOT
47341+ help
47342+ If you say Y here, processes inside a chroot will not be allowed to
47343+ mknod. The problem with using mknod inside a chroot is that it
47344+ would allow an attacker to create a device entry that is the same
47345+ as one on the physical root of your system, which could range from
47346+ anything from the console device to a device for your harddrive (which
47347+ they could then use to wipe the drive or steal data). It is recommended
47348+ that you say Y here, unless you run into software incompatibilities.
47349+ If the sysctl option is enabled, a sysctl option with name
47350+ "chroot_deny_mknod" is created.
47351+
47352+config GRKERNSEC_CHROOT_SHMAT
47353+ bool "Deny shmat() out of chroot"
47354+ depends on GRKERNSEC_CHROOT
47355+ help
47356+ If you say Y here, processes inside a chroot will not be able to attach
47357+ to shared memory segments that were created outside of the chroot jail.
47358+ It is recommended that you say Y here. If the sysctl option is enabled,
47359+ a sysctl option with name "chroot_deny_shmat" is created.
47360+
47361+config GRKERNSEC_CHROOT_UNIX
47362+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
47363+ depends on GRKERNSEC_CHROOT
47364+ help
47365+ If you say Y here, processes inside a chroot will not be able to
47366+ connect to abstract (meaning not belonging to a filesystem) Unix
47367+ domain sockets that were bound outside of a chroot. It is recommended
47368+ that you say Y here. If the sysctl option is enabled, a sysctl option
47369+ with name "chroot_deny_unix" is created.
47370+
47371+config GRKERNSEC_CHROOT_FINDTASK
47372+ bool "Protect outside processes"
47373+ depends on GRKERNSEC_CHROOT
47374+ help
47375+ If you say Y here, processes inside a chroot will not be able to
47376+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
47377+ getsid, or view any process outside of the chroot. If the sysctl
47378+ option is enabled, a sysctl option with name "chroot_findtask" is
47379+ created.
47380+
47381+config GRKERNSEC_CHROOT_NICE
47382+ bool "Restrict priority changes"
47383+ depends on GRKERNSEC_CHROOT
47384+ help
47385+ If you say Y here, processes inside a chroot will not be able to raise
47386+ the priority of processes in the chroot, or alter the priority of
47387+ processes outside the chroot. This provides more security than simply
47388+ removing CAP_SYS_NICE from the process' capability set. If the
47389+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
47390+ is created.
47391+
47392+config GRKERNSEC_CHROOT_SYSCTL
47393+ bool "Deny sysctl writes"
47394+ depends on GRKERNSEC_CHROOT
47395+ help
47396+ If you say Y here, an attacker in a chroot will not be able to
47397+ write to sysctl entries, either by sysctl(2) or through a /proc
47398+ interface. It is strongly recommended that you say Y here. If the
47399+ sysctl option is enabled, a sysctl option with name
47400+ "chroot_deny_sysctl" is created.
47401+
47402+config GRKERNSEC_CHROOT_CAPS
47403+ bool "Capability restrictions"
47404+ depends on GRKERNSEC_CHROOT
47405+ help
47406+ If you say Y here, the capabilities on all processes within a
47407+ chroot jail will be lowered to stop module insertion, raw i/o,
47408+ system and net admin tasks, rebooting the system, modifying immutable
47409+ files, modifying IPC owned by another, and changing the system time.
47410+ This is left an option because it can break some apps. Disable this
47411+ if your chrooted apps are having problems performing those kinds of
47412+ tasks. If the sysctl option is enabled, a sysctl option with
47413+ name "chroot_caps" is created.
47414+
47415+endmenu
47416+menu "Kernel Auditing"
47417+depends on GRKERNSEC
47418+
47419+config GRKERNSEC_AUDIT_GROUP
47420+ bool "Single group for auditing"
47421+ help
47422+ If you say Y here, the exec, chdir, and (un)mount logging features
47423+ will only operate on a group you specify. This option is recommended
47424+ if you only want to watch certain users instead of having a large
47425+ amount of logs from the entire system. If the sysctl option is enabled,
47426+ a sysctl option with name "audit_group" is created.
47427+
47428+config GRKERNSEC_AUDIT_GID
47429+ int "GID for auditing"
47430+ depends on GRKERNSEC_AUDIT_GROUP
47431+ default 1007
47432+
47433+config GRKERNSEC_EXECLOG
47434+ bool "Exec logging"
47435+ help
47436+ If you say Y here, all execve() calls will be logged (since the
47437+ other exec*() calls are frontends to execve(), all execution
47438+ will be logged). Useful for shell-servers that like to keep track
47439+ of their users. If the sysctl option is enabled, a sysctl option with
47440+ name "exec_logging" is created.
47441+ WARNING: This option when enabled will produce a LOT of logs, especially
47442+ on an active system.
47443+
47444+config GRKERNSEC_RESLOG
47445+ bool "Resource logging"
47446+ help
47447+ If you say Y here, all attempts to overstep resource limits will
47448+ be logged with the resource name, the requested size, and the current
47449+ limit. It is highly recommended that you say Y here. If the sysctl
47450+ option is enabled, a sysctl option with name "resource_logging" is
47451+ created. If the RBAC system is enabled, the sysctl value is ignored.
47452+
47453+config GRKERNSEC_CHROOT_EXECLOG
47454+ bool "Log execs within chroot"
47455+ help
47456+ If you say Y here, all executions inside a chroot jail will be logged
47457+ to syslog. This can cause a large amount of logs if certain
47458+ applications (eg. djb's daemontools) are installed on the system, and
47459+ is therefore left as an option. If the sysctl option is enabled, a
47460+ sysctl option with name "chroot_execlog" is created.
47461+
47462+config GRKERNSEC_AUDIT_PTRACE
47463+ bool "Ptrace logging"
47464+ help
47465+ If you say Y here, all attempts to attach to a process via ptrace
47466+ will be logged. If the sysctl option is enabled, a sysctl option
47467+ with name "audit_ptrace" is created.
47468+
47469+config GRKERNSEC_AUDIT_CHDIR
47470+ bool "Chdir logging"
47471+ help
47472+ If you say Y here, all chdir() calls will be logged. If the sysctl
47473+ option is enabled, a sysctl option with name "audit_chdir" is created.
47474+
47475+config GRKERNSEC_AUDIT_MOUNT
47476+ bool "(Un)Mount logging"
47477+ help
47478+ If you say Y here, all mounts and unmounts will be logged. If the
47479+ sysctl option is enabled, a sysctl option with name "audit_mount" is
47480+ created.
47481+
47482+config GRKERNSEC_SIGNAL
47483+ bool "Signal logging"
47484+ help
47485+ If you say Y here, certain important signals will be logged, such as
47486+ SIGSEGV, which will as a result inform you of when a error in a program
47487+ occurred, which in some cases could mean a possible exploit attempt.
47488+ If the sysctl option is enabled, a sysctl option with name
47489+ "signal_logging" is created.
47490+
47491+config GRKERNSEC_FORKFAIL
47492+ bool "Fork failure logging"
47493+ help
47494+ If you say Y here, all failed fork() attempts will be logged.
47495+ This could suggest a fork bomb, or someone attempting to overstep
47496+ their process limit. If the sysctl option is enabled, a sysctl option
47497+ with name "forkfail_logging" is created.
47498+
47499+config GRKERNSEC_TIME
47500+ bool "Time change logging"
47501+ help
47502+ If you say Y here, any changes of the system clock will be logged.
47503+ If the sysctl option is enabled, a sysctl option with name
47504+ "timechange_logging" is created.
47505+
47506+config GRKERNSEC_PROC_IPADDR
47507+ bool "/proc/<pid>/ipaddr support"
47508+ help
47509+ If you say Y here, a new entry will be added to each /proc/<pid>
47510+ directory that contains the IP address of the person using the task.
47511+ The IP is carried across local TCP and AF_UNIX stream sockets.
47512+ This information can be useful for IDS/IPSes to perform remote response
47513+ to a local attack. The entry is readable by only the owner of the
47514+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
47515+ the RBAC system), and thus does not create privacy concerns.
47516+
47517+config GRKERNSEC_RWXMAP_LOG
47518+ bool 'Denied RWX mmap/mprotect logging'
47519+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
47520+ help
47521+ If you say Y here, calls to mmap() and mprotect() with explicit
47522+ usage of PROT_WRITE and PROT_EXEC together will be logged when
47523+ denied by the PAX_MPROTECT feature. If the sysctl option is
47524+ enabled, a sysctl option with name "rwxmap_logging" is created.
47525+
47526+config GRKERNSEC_AUDIT_TEXTREL
47527+ bool 'ELF text relocations logging (READ HELP)'
47528+ depends on PAX_MPROTECT
47529+ help
47530+ If you say Y here, text relocations will be logged with the filename
47531+ of the offending library or binary. The purpose of the feature is
47532+ to help Linux distribution developers get rid of libraries and
47533+ binaries that need text relocations which hinder the future progress
47534+ of PaX. Only Linux distribution developers should say Y here, and
47535+ never on a production machine, as this option creates an information
47536+ leak that could aid an attacker in defeating the randomization of
47537+ a single memory region. If the sysctl option is enabled, a sysctl
47538+ option with name "audit_textrel" is created.
47539+
47540+endmenu
47541+
47542+menu "Executable Protections"
47543+depends on GRKERNSEC
47544+
47545+config GRKERNSEC_DMESG
47546+ bool "Dmesg(8) restriction"
47547+ help
47548+ If you say Y here, non-root users will not be able to use dmesg(8)
47549+ to view up to the last 4kb of messages in the kernel's log buffer.
47550+ The kernel's log buffer often contains kernel addresses and other
47551+ identifying information useful to an attacker in fingerprinting a
47552+ system for a targeted exploit.
47553+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
47554+ created.
47555+
47556+config GRKERNSEC_HARDEN_PTRACE
47557+ bool "Deter ptrace-based process snooping"
47558+ help
47559+ If you say Y here, TTY sniffers and other malicious monitoring
47560+ programs implemented through ptrace will be defeated. If you
47561+ have been using the RBAC system, this option has already been
47562+ enabled for several years for all users, with the ability to make
47563+ fine-grained exceptions.
47564+
47565+ This option only affects the ability of non-root users to ptrace
47566+ processes that are not a descendent of the ptracing process.
47567+ This means that strace ./binary and gdb ./binary will still work,
47568+ but attaching to arbitrary processes will not. If the sysctl
47569+ option is enabled, a sysctl option with name "harden_ptrace" is
47570+ created.
47571+
47572+config GRKERNSEC_PTRACE_READEXEC
47573+ bool "Require read access to ptrace sensitive binaries"
47574+ help
47575+ If you say Y here, read permission will be required by any unprivileged
47576+ process to ptrace suid/sgid binaries. Note that the ability to
47577+ ptrace privileged binaries and retain that binary's privilege is
47578+ already not possible. This option is useful in environments that
47579+ remove the read bits (e.g. file mode 4711) from suid binaries to
47580+ prevent infoleaking of their contents. What this option adds
47581+ is consistency to the use of that file mode, as the binary could normally
47582+ be read out when run without privileges while ptracing.
47583+
47584+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
47585+ is created.
47586+
47587+config GRKERNSEC_SETXID
47588+ bool "Enforce consistent multithreaded privileges"
47589+ help
47590+ If you say Y here, a change from a root uid to a non-root uid
47591+ in a multithreaded application will cause the resulting uids,
47592+ gids, supplementary groups, and capabilities in that thread
47593+ to be propagated to the other threads of the process. In most
47594+ cases this is unnecessary, as glibc will emulate this behavior
47595+ on behalf of the application. Other libcs do not act in the
47596+ same way, allowing the other threads of the process to continue
47597+ running with root privileges. If the sysctl option is enabled,
47598+ a sysctl option with name "consistent_setxid" is created.
47599+
47600+config GRKERNSEC_TPE
47601+ bool "Trusted Path Execution (TPE)"
47602+ help
47603+ If you say Y here, you will be able to choose a gid to add to the
47604+ supplementary groups of users you want to mark as "untrusted."
47605+ These users will not be able to execute any files that are not in
47606+ root-owned directories writable only by root. If the sysctl option
47607+ is enabled, a sysctl option with name "tpe" is created.
47608+
47609+config GRKERNSEC_TPE_ALL
47610+ bool "Partially restrict all non-root users"
47611+ depends on GRKERNSEC_TPE
47612+ help
47613+ If you say Y here, all non-root users will be covered under
47614+ a weaker TPE restriction. This is separate from, and in addition to,
47615+ the main TPE options that you have selected elsewhere. Thus, if a
47616+ "trusted" GID is chosen, this restriction applies to even that GID.
47617+ Under this restriction, all non-root users will only be allowed to
47618+ execute files in directories they own that are not group or
47619+ world-writable, or in directories owned by root and writable only by
47620+ root. If the sysctl option is enabled, a sysctl option with name
47621+ "tpe_restrict_all" is created.
47622+
47623+config GRKERNSEC_TPE_INVERT
47624+ bool "Invert GID option"
47625+ depends on GRKERNSEC_TPE
47626+ help
47627+ If you say Y here, the group you specify in the TPE configuration will
47628+ decide what group TPE restrictions will be *disabled* for. This
47629+ option is useful if you want TPE restrictions to be applied to most
47630+ users on the system. If the sysctl option is enabled, a sysctl option
47631+ with name "tpe_invert" is created. Unlike other sysctl options, this
47632+ entry will default to on for backward-compatibility.
47633+
47634+config GRKERNSEC_TPE_GID
47635+ int "GID for untrusted users"
47636+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
47637+ default 1005
47638+ help
47639+ Setting this GID determines what group TPE restrictions will be
47640+ *enabled* for. If the sysctl option is enabled, a sysctl option
47641+ with name "tpe_gid" is created.
47642+
47643+config GRKERNSEC_TPE_GID
47644+ int "GID for trusted users"
47645+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
47646+ default 1005
47647+ help
47648+ Setting this GID determines what group TPE restrictions will be
47649+ *disabled* for. If the sysctl option is enabled, a sysctl option
47650+ with name "tpe_gid" is created.
47651+
47652+endmenu
47653+menu "Network Protections"
47654+depends on GRKERNSEC
47655+
47656+config GRKERNSEC_RANDNET
47657+ bool "Larger entropy pools"
47658+ help
47659+ If you say Y here, the entropy pools used for many features of Linux
47660+ and grsecurity will be doubled in size. Since several grsecurity
47661+ features use additional randomness, it is recommended that you say Y
47662+ here. Saying Y here has a similar effect as modifying
47663+ /proc/sys/kernel/random/poolsize.
47664+
47665+config GRKERNSEC_BLACKHOLE
47666+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
47667+ depends on NET
47668+ help
47669+ If you say Y here, neither TCP resets nor ICMP
47670+ destination-unreachable packets will be sent in response to packets
47671+ sent to ports for which no associated listening process exists.
47672+ This feature supports both IPV4 and IPV6 and exempts the
47673+ loopback interface from blackholing. Enabling this feature
47674+ makes a host more resilient to DoS attacks and reduces network
47675+ visibility against scanners.
47676+
47677+ The blackhole feature as-implemented is equivalent to the FreeBSD
47678+ blackhole feature, as it prevents RST responses to all packets, not
47679+ just SYNs. Under most application behavior this causes no
47680+ problems, but applications (like haproxy) may not close certain
47681+ connections in a way that cleanly terminates them on the remote
47682+ end, leaving the remote host in LAST_ACK state. Because of this
47683+ side-effect and to prevent intentional LAST_ACK DoSes, this
47684+ feature also adds automatic mitigation against such attacks.
47685+ The mitigation drastically reduces the amount of time a socket
47686+ can spend in LAST_ACK state. If you're using haproxy and not
47687+ all servers it connects to have this option enabled, consider
47688+ disabling this feature on the haproxy host.
47689+
47690+ If the sysctl option is enabled, two sysctl options with names
47691+ "ip_blackhole" and "lastack_retries" will be created.
47692+ While "ip_blackhole" takes the standard zero/non-zero on/off
47693+ toggle, "lastack_retries" uses the same kinds of values as
47694+ "tcp_retries1" and "tcp_retries2". The default value of 4
47695+ prevents a socket from lasting more than 45 seconds in LAST_ACK
47696+ state.
47697+
47698+config GRKERNSEC_SOCKET
47699+ bool "Socket restrictions"
47700+ depends on NET
47701+ help
47702+ If you say Y here, you will be able to choose from several options.
47703+ If you assign a GID on your system and add it to the supplementary
47704+ groups of users you want to restrict socket access to, this patch
47705+ will perform up to three things, based on the option(s) you choose.
47706+
47707+config GRKERNSEC_SOCKET_ALL
47708+ bool "Deny any sockets to group"
47709+ depends on GRKERNSEC_SOCKET
47710+ help
47711+ If you say Y here, you will be able to choose a GID of whose users will
47712+ be unable to connect to other hosts from your machine or run server
47713+ applications from your machine. If the sysctl option is enabled, a
47714+ sysctl option with name "socket_all" is created.
47715+
47716+config GRKERNSEC_SOCKET_ALL_GID
47717+ int "GID to deny all sockets for"
47718+ depends on GRKERNSEC_SOCKET_ALL
47719+ default 1004
47720+ help
47721+ Here you can choose the GID to disable socket access for. Remember to
47722+ add the users you want socket access disabled for to the GID
47723+ specified here. If the sysctl option is enabled, a sysctl option
47724+ with name "socket_all_gid" is created.
47725+
47726+config GRKERNSEC_SOCKET_CLIENT
47727+ bool "Deny client sockets to group"
47728+ depends on GRKERNSEC_SOCKET
47729+ help
47730+ If you say Y here, you will be able to choose a GID of whose users will
47731+ be unable to connect to other hosts from your machine, but will be
47732+ able to run servers. If this option is enabled, all users in the group
47733+ you specify will have to use passive mode when initiating ftp transfers
47734+ from the shell on your machine. If the sysctl option is enabled, a
47735+ sysctl option with name "socket_client" is created.
47736+
47737+config GRKERNSEC_SOCKET_CLIENT_GID
47738+ int "GID to deny client sockets for"
47739+ depends on GRKERNSEC_SOCKET_CLIENT
47740+ default 1003
47741+ help
47742+ Here you can choose the GID to disable client socket access for.
47743+ Remember to add the users you want client socket access disabled for to
47744+ the GID specified here. If the sysctl option is enabled, a sysctl
47745+ option with name "socket_client_gid" is created.
47746+
47747+config GRKERNSEC_SOCKET_SERVER
47748+ bool "Deny server sockets to group"
47749+ depends on GRKERNSEC_SOCKET
47750+ help
47751+ If you say Y here, you will be able to choose a GID of whose users will
47752+ be unable to run server applications from your machine. If the sysctl
47753+ option is enabled, a sysctl option with name "socket_server" is created.
47754+
47755+config GRKERNSEC_SOCKET_SERVER_GID
47756+ int "GID to deny server sockets for"
47757+ depends on GRKERNSEC_SOCKET_SERVER
47758+ default 1002
47759+ help
47760+ Here you can choose the GID to disable server socket access for.
47761+ Remember to add the users you want server socket access disabled for to
47762+ the GID specified here. If the sysctl option is enabled, a sysctl
47763+ option with name "socket_server_gid" is created.
47764+
47765+endmenu
47766+menu "Sysctl support"
47767+depends on GRKERNSEC && SYSCTL
47768+
47769+config GRKERNSEC_SYSCTL
47770+ bool "Sysctl support"
47771+ help
47772+ If you say Y here, you will be able to change the options that
47773+ grsecurity runs with at bootup, without having to recompile your
47774+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
47775+ to enable (1) or disable (0) various features. All the sysctl entries
47776+ are mutable until the "grsec_lock" entry is set to a non-zero value.
47777+ All features enabled in the kernel configuration are disabled at boot
47778+ if you do not say Y to the "Turn on features by default" option.
47779+ All options should be set at startup, and the grsec_lock entry should
47780+ be set to a non-zero value after all the options are set.
47781+ *THIS IS EXTREMELY IMPORTANT*
47782+
47783+config GRKERNSEC_SYSCTL_DISTRO
47784+ bool "Extra sysctl support for distro makers (READ HELP)"
47785+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
47786+ help
47787+ If you say Y here, additional sysctl options will be created
47788+ for features that affect processes running as root. Therefore,
47789+ it is critical when using this option that the grsec_lock entry be
47790+ enabled after boot. Only distros with prebuilt kernel packages
47791+ with this option enabled that can ensure grsec_lock is enabled
47792+ after boot should use this option.
47793+ *Failure to set grsec_lock after boot makes all grsec features
47794+ this option covers useless*
47795+
47796+ Currently this option creates the following sysctl entries:
47797+ "Disable Privileged I/O": "disable_priv_io"
47798+
47799+config GRKERNSEC_SYSCTL_ON
47800+ bool "Turn on features by default"
47801+ depends on GRKERNSEC_SYSCTL
47802+ help
47803+ If you say Y here, instead of having all features enabled in the
47804+ kernel configuration disabled at boot time, the features will be
47805+ enabled at boot time. It is recommended you say Y here unless
47806+ there is some reason you would want all sysctl-tunable features to
47807+ be disabled by default. As mentioned elsewhere, it is important
47808+ to enable the grsec_lock entry once you have finished modifying
47809+ the sysctl entries.
47810+
47811+endmenu
47812+menu "Logging Options"
47813+depends on GRKERNSEC
47814+
47815+config GRKERNSEC_FLOODTIME
47816+ int "Seconds in between log messages (minimum)"
47817+ default 10
47818+ help
47819+ This option allows you to enforce the number of seconds between
47820+ grsecurity log messages. The default should be suitable for most
47821+ people, however, if you choose to change it, choose a value small enough
47822+ to allow informative logs to be produced, but large enough to
47823+ prevent flooding.
47824+
47825+config GRKERNSEC_FLOODBURST
47826+ int "Number of messages in a burst (maximum)"
47827+ default 6
47828+ help
47829+ This option allows you to choose the maximum number of messages allowed
47830+ within the flood time interval you chose in a separate option. The
47831+ default should be suitable for most people, however if you find that
47832+ many of your logs are being interpreted as flooding, you may want to
47833+ raise this value.
47834+
47835+endmenu
47836+
47837+endmenu
47838diff --git a/grsecurity/Makefile b/grsecurity/Makefile
47839new file mode 100644
47840index 0000000..be9ae3a
47841--- /dev/null
47842+++ b/grsecurity/Makefile
47843@@ -0,0 +1,36 @@
47844+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
47845+# during 2001-2009 it has been completely redesigned by Brad Spengler
47846+# into an RBAC system
47847+#
47848+# All code in this directory and various hooks inserted throughout the kernel
47849+# are copyright Brad Spengler - Open Source Security, Inc., and released
47850+# under the GPL v2 or higher
47851+
47852+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
47853+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
47854+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
47855+
47856+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
47857+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
47858+ gracl_learn.o grsec_log.o
47859+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
47860+
47861+ifdef CONFIG_NET
47862+obj-y += grsec_sock.o
47863+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
47864+endif
47865+
47866+ifndef CONFIG_GRKERNSEC
47867+obj-y += grsec_disabled.o
47868+endif
47869+
47870+ifdef CONFIG_GRKERNSEC_HIDESYM
47871+extra-y := grsec_hidesym.o
47872+$(obj)/grsec_hidesym.o:
47873+ @-chmod -f 500 /boot
47874+ @-chmod -f 500 /lib/modules
47875+ @-chmod -f 500 /lib64/modules
47876+ @-chmod -f 500 /lib32/modules
47877+ @-chmod -f 700 .
47878+ @echo ' grsec: protected kernel image paths'
47879+endif
47880diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
47881new file mode 100644
47882index 0000000..09258e0
47883--- /dev/null
47884+++ b/grsecurity/gracl.c
47885@@ -0,0 +1,4156 @@
47886+#include <linux/kernel.h>
47887+#include <linux/module.h>
47888+#include <linux/sched.h>
47889+#include <linux/mm.h>
47890+#include <linux/file.h>
47891+#include <linux/fs.h>
47892+#include <linux/namei.h>
47893+#include <linux/mount.h>
47894+#include <linux/tty.h>
47895+#include <linux/proc_fs.h>
47896+#include <linux/lglock.h>
47897+#include <linux/slab.h>
47898+#include <linux/vmalloc.h>
47899+#include <linux/types.h>
47900+#include <linux/sysctl.h>
47901+#include <linux/netdevice.h>
47902+#include <linux/ptrace.h>
47903+#include <linux/gracl.h>
47904+#include <linux/gralloc.h>
47905+#include <linux/grsecurity.h>
47906+#include <linux/grinternal.h>
47907+#include <linux/pid_namespace.h>
47908+#include <linux/fdtable.h>
47909+#include <linux/percpu.h>
47910+
47911+#include <asm/uaccess.h>
47912+#include <asm/errno.h>
47913+#include <asm/mman.h>
47914+
47915+static struct acl_role_db acl_role_set;
47916+static struct name_db name_set;
47917+static struct inodev_db inodev_set;
47918+
47919+/* for keeping track of userspace pointers used for subjects, so we
47920+ can share references in the kernel as well
47921+*/
47922+
47923+static struct path real_root;
47924+
47925+static struct acl_subj_map_db subj_map_set;
47926+
47927+static struct acl_role_label *default_role;
47928+
47929+static struct acl_role_label *role_list;
47930+
47931+static u16 acl_sp_role_value;
47932+
47933+extern char *gr_shared_page[4];
47934+static DEFINE_MUTEX(gr_dev_mutex);
47935+DEFINE_RWLOCK(gr_inode_lock);
47936+
47937+struct gr_arg *gr_usermode;
47938+
47939+static unsigned int gr_status __read_only = GR_STATUS_INIT;
47940+
47941+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
47942+extern void gr_clear_learn_entries(void);
47943+
47944+#ifdef CONFIG_GRKERNSEC_RESLOG
47945+extern void gr_log_resource(const struct task_struct *task,
47946+ const int res, const unsigned long wanted, const int gt);
47947+#endif
47948+
47949+unsigned char *gr_system_salt;
47950+unsigned char *gr_system_sum;
47951+
47952+static struct sprole_pw **acl_special_roles = NULL;
47953+static __u16 num_sprole_pws = 0;
47954+
47955+static struct acl_role_label *kernel_role = NULL;
47956+
47957+static unsigned int gr_auth_attempts = 0;
47958+static unsigned long gr_auth_expires = 0UL;
47959+
47960+#ifdef CONFIG_NET
47961+extern struct vfsmount *sock_mnt;
47962+#endif
47963+
47964+extern struct vfsmount *pipe_mnt;
47965+extern struct vfsmount *shm_mnt;
47966+#ifdef CONFIG_HUGETLBFS
47967+extern struct vfsmount *hugetlbfs_vfsmount;
47968+#endif
47969+
47970+static struct acl_object_label *fakefs_obj_rw;
47971+static struct acl_object_label *fakefs_obj_rwx;
47972+
47973+extern int gr_init_uidset(void);
47974+extern void gr_free_uidset(void);
47975+extern void gr_remove_uid(uid_t uid);
47976+extern int gr_find_uid(uid_t uid);
47977+
47978+DECLARE_BRLOCK(vfsmount_lock);
47979+
47980+__inline__ int
47981+gr_acl_is_enabled(void)
47982+{
47983+ return (gr_status & GR_READY);
47984+}
47985+
47986+#ifdef CONFIG_BTRFS_FS
47987+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
47988+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
47989+#endif
47990+
47991+static inline dev_t __get_dev(const struct dentry *dentry)
47992+{
47993+#ifdef CONFIG_BTRFS_FS
47994+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
47995+ return get_btrfs_dev_from_inode(dentry->d_inode);
47996+ else
47997+#endif
47998+ return dentry->d_inode->i_sb->s_dev;
47999+}
48000+
48001+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48002+{
48003+ return __get_dev(dentry);
48004+}
48005+
48006+static char gr_task_roletype_to_char(struct task_struct *task)
48007+{
48008+ switch (task->role->roletype &
48009+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
48010+ GR_ROLE_SPECIAL)) {
48011+ case GR_ROLE_DEFAULT:
48012+ return 'D';
48013+ case GR_ROLE_USER:
48014+ return 'U';
48015+ case GR_ROLE_GROUP:
48016+ return 'G';
48017+ case GR_ROLE_SPECIAL:
48018+ return 'S';
48019+ }
48020+
48021+ return 'X';
48022+}
48023+
48024+char gr_roletype_to_char(void)
48025+{
48026+ return gr_task_roletype_to_char(current);
48027+}
48028+
48029+__inline__ int
48030+gr_acl_tpe_check(void)
48031+{
48032+ if (unlikely(!(gr_status & GR_READY)))
48033+ return 0;
48034+ if (current->role->roletype & GR_ROLE_TPE)
48035+ return 1;
48036+ else
48037+ return 0;
48038+}
48039+
48040+int
48041+gr_handle_rawio(const struct inode *inode)
48042+{
48043+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48044+ if (inode && S_ISBLK(inode->i_mode) &&
48045+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
48046+ !capable(CAP_SYS_RAWIO))
48047+ return 1;
48048+#endif
48049+ return 0;
48050+}
48051+
48052+static int
48053+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
48054+{
48055+ if (likely(lena != lenb))
48056+ return 0;
48057+
48058+ return !memcmp(a, b, lena);
48059+}
48060+
48061+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
48062+{
48063+ *buflen -= namelen;
48064+ if (*buflen < 0)
48065+ return -ENAMETOOLONG;
48066+ *buffer -= namelen;
48067+ memcpy(*buffer, str, namelen);
48068+ return 0;
48069+}
48070+
48071+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
48072+{
48073+ return prepend(buffer, buflen, name->name, name->len);
48074+}
48075+
48076+static int prepend_path(const struct path *path, struct path *root,
48077+ char **buffer, int *buflen)
48078+{
48079+ struct dentry *dentry = path->dentry;
48080+ struct vfsmount *vfsmnt = path->mnt;
48081+ bool slash = false;
48082+ int error = 0;
48083+
48084+ while (dentry != root->dentry || vfsmnt != root->mnt) {
48085+ struct dentry * parent;
48086+
48087+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
48088+ /* Global root? */
48089+ if (vfsmnt->mnt_parent == vfsmnt) {
48090+ goto out;
48091+ }
48092+ dentry = vfsmnt->mnt_mountpoint;
48093+ vfsmnt = vfsmnt->mnt_parent;
48094+ continue;
48095+ }
48096+ parent = dentry->d_parent;
48097+ prefetch(parent);
48098+ spin_lock(&dentry->d_lock);
48099+ error = prepend_name(buffer, buflen, &dentry->d_name);
48100+ spin_unlock(&dentry->d_lock);
48101+ if (!error)
48102+ error = prepend(buffer, buflen, "/", 1);
48103+ if (error)
48104+ break;
48105+
48106+ slash = true;
48107+ dentry = parent;
48108+ }
48109+
48110+out:
48111+ if (!error && !slash)
48112+ error = prepend(buffer, buflen, "/", 1);
48113+
48114+ return error;
48115+}
48116+
48117+/* this must be called with vfsmount_lock and rename_lock held */
48118+
48119+static char *__our_d_path(const struct path *path, struct path *root,
48120+ char *buf, int buflen)
48121+{
48122+ char *res = buf + buflen;
48123+ int error;
48124+
48125+ prepend(&res, &buflen, "\0", 1);
48126+ error = prepend_path(path, root, &res, &buflen);
48127+ if (error)
48128+ return ERR_PTR(error);
48129+
48130+ return res;
48131+}
48132+
48133+static char *
48134+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
48135+{
48136+ char *retval;
48137+
48138+ retval = __our_d_path(path, root, buf, buflen);
48139+ if (unlikely(IS_ERR(retval)))
48140+ retval = strcpy(buf, "<path too long>");
48141+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
48142+ retval[1] = '\0';
48143+
48144+ return retval;
48145+}
48146+
48147+static char *
48148+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48149+ char *buf, int buflen)
48150+{
48151+ struct path path;
48152+ char *res;
48153+
48154+ path.dentry = (struct dentry *)dentry;
48155+ path.mnt = (struct vfsmount *)vfsmnt;
48156+
48157+ /* we can use real_root.dentry, real_root.mnt, because this is only called
48158+ by the RBAC system */
48159+ res = gen_full_path(&path, &real_root, buf, buflen);
48160+
48161+ return res;
48162+}
48163+
48164+static char *
48165+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48166+ char *buf, int buflen)
48167+{
48168+ char *res;
48169+ struct path path;
48170+ struct path root;
48171+ struct task_struct *reaper = &init_task;
48172+
48173+ path.dentry = (struct dentry *)dentry;
48174+ path.mnt = (struct vfsmount *)vfsmnt;
48175+
48176+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
48177+ get_fs_root(reaper->fs, &root);
48178+
48179+ write_seqlock(&rename_lock);
48180+ br_read_lock(vfsmount_lock);
48181+ res = gen_full_path(&path, &root, buf, buflen);
48182+ br_read_unlock(vfsmount_lock);
48183+ write_sequnlock(&rename_lock);
48184+
48185+ path_put(&root);
48186+ return res;
48187+}
48188+
48189+static char *
48190+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48191+{
48192+ char *ret;
48193+ write_seqlock(&rename_lock);
48194+ br_read_lock(vfsmount_lock);
48195+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48196+ PAGE_SIZE);
48197+ br_read_unlock(vfsmount_lock);
48198+ write_sequnlock(&rename_lock);
48199+ return ret;
48200+}
48201+
48202+static char *
48203+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48204+{
48205+ char *ret;
48206+ char *buf;
48207+ int buflen;
48208+
48209+ write_seqlock(&rename_lock);
48210+ br_read_lock(vfsmount_lock);
48211+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48212+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
48213+ buflen = (int)(ret - buf);
48214+ if (buflen >= 5)
48215+ prepend(&ret, &buflen, "/proc", 5);
48216+ else
48217+ ret = strcpy(buf, "<path too long>");
48218+ br_read_unlock(vfsmount_lock);
48219+ write_sequnlock(&rename_lock);
48220+ return ret;
48221+}
48222+
48223+char *
48224+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
48225+{
48226+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48227+ PAGE_SIZE);
48228+}
48229+
48230+char *
48231+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
48232+{
48233+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48234+ PAGE_SIZE);
48235+}
48236+
48237+char *
48238+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
48239+{
48240+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
48241+ PAGE_SIZE);
48242+}
48243+
48244+char *
48245+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
48246+{
48247+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
48248+ PAGE_SIZE);
48249+}
48250+
48251+char *
48252+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
48253+{
48254+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
48255+ PAGE_SIZE);
48256+}
48257+
48258+__inline__ __u32
48259+to_gr_audit(const __u32 reqmode)
48260+{
48261+ /* masks off auditable permission flags, then shifts them to create
48262+ auditing flags, and adds the special case of append auditing if
48263+ we're requesting write */
48264+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
48265+}
48266+
48267+struct acl_subject_label *
48268+lookup_subject_map(const struct acl_subject_label *userp)
48269+{
48270+ unsigned int index = shash(userp, subj_map_set.s_size);
48271+ struct subject_map *match;
48272+
48273+ match = subj_map_set.s_hash[index];
48274+
48275+ while (match && match->user != userp)
48276+ match = match->next;
48277+
48278+ if (match != NULL)
48279+ return match->kernel;
48280+ else
48281+ return NULL;
48282+}
48283+
48284+static void
48285+insert_subj_map_entry(struct subject_map *subjmap)
48286+{
48287+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
48288+ struct subject_map **curr;
48289+
48290+ subjmap->prev = NULL;
48291+
48292+ curr = &subj_map_set.s_hash[index];
48293+ if (*curr != NULL)
48294+ (*curr)->prev = subjmap;
48295+
48296+ subjmap->next = *curr;
48297+ *curr = subjmap;
48298+
48299+ return;
48300+}
48301+
48302+static struct acl_role_label *
48303+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
48304+ const gid_t gid)
48305+{
48306+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
48307+ struct acl_role_label *match;
48308+ struct role_allowed_ip *ipp;
48309+ unsigned int x;
48310+ u32 curr_ip = task->signal->curr_ip;
48311+
48312+ task->signal->saved_ip = curr_ip;
48313+
48314+ match = acl_role_set.r_hash[index];
48315+
48316+ while (match) {
48317+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
48318+ for (x = 0; x < match->domain_child_num; x++) {
48319+ if (match->domain_children[x] == uid)
48320+ goto found;
48321+ }
48322+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
48323+ break;
48324+ match = match->next;
48325+ }
48326+found:
48327+ if (match == NULL) {
48328+ try_group:
48329+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
48330+ match = acl_role_set.r_hash[index];
48331+
48332+ while (match) {
48333+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
48334+ for (x = 0; x < match->domain_child_num; x++) {
48335+ if (match->domain_children[x] == gid)
48336+ goto found2;
48337+ }
48338+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
48339+ break;
48340+ match = match->next;
48341+ }
48342+found2:
48343+ if (match == NULL)
48344+ match = default_role;
48345+ if (match->allowed_ips == NULL)
48346+ return match;
48347+ else {
48348+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48349+ if (likely
48350+ ((ntohl(curr_ip) & ipp->netmask) ==
48351+ (ntohl(ipp->addr) & ipp->netmask)))
48352+ return match;
48353+ }
48354+ match = default_role;
48355+ }
48356+ } else if (match->allowed_ips == NULL) {
48357+ return match;
48358+ } else {
48359+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48360+ if (likely
48361+ ((ntohl(curr_ip) & ipp->netmask) ==
48362+ (ntohl(ipp->addr) & ipp->netmask)))
48363+ return match;
48364+ }
48365+ goto try_group;
48366+ }
48367+
48368+ return match;
48369+}
48370+
48371+struct acl_subject_label *
48372+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
48373+ const struct acl_role_label *role)
48374+{
48375+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48376+ struct acl_subject_label *match;
48377+
48378+ match = role->subj_hash[index];
48379+
48380+ while (match && (match->inode != ino || match->device != dev ||
48381+ (match->mode & GR_DELETED))) {
48382+ match = match->next;
48383+ }
48384+
48385+ if (match && !(match->mode & GR_DELETED))
48386+ return match;
48387+ else
48388+ return NULL;
48389+}
48390+
48391+struct acl_subject_label *
48392+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
48393+ const struct acl_role_label *role)
48394+{
48395+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48396+ struct acl_subject_label *match;
48397+
48398+ match = role->subj_hash[index];
48399+
48400+ while (match && (match->inode != ino || match->device != dev ||
48401+ !(match->mode & GR_DELETED))) {
48402+ match = match->next;
48403+ }
48404+
48405+ if (match && (match->mode & GR_DELETED))
48406+ return match;
48407+ else
48408+ return NULL;
48409+}
48410+
48411+static struct acl_object_label *
48412+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
48413+ const struct acl_subject_label *subj)
48414+{
48415+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48416+ struct acl_object_label *match;
48417+
48418+ match = subj->obj_hash[index];
48419+
48420+ while (match && (match->inode != ino || match->device != dev ||
48421+ (match->mode & GR_DELETED))) {
48422+ match = match->next;
48423+ }
48424+
48425+ if (match && !(match->mode & GR_DELETED))
48426+ return match;
48427+ else
48428+ return NULL;
48429+}
48430+
48431+static struct acl_object_label *
48432+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
48433+ const struct acl_subject_label *subj)
48434+{
48435+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48436+ struct acl_object_label *match;
48437+
48438+ match = subj->obj_hash[index];
48439+
48440+ while (match && (match->inode != ino || match->device != dev ||
48441+ !(match->mode & GR_DELETED))) {
48442+ match = match->next;
48443+ }
48444+
48445+ if (match && (match->mode & GR_DELETED))
48446+ return match;
48447+
48448+ match = subj->obj_hash[index];
48449+
48450+ while (match && (match->inode != ino || match->device != dev ||
48451+ (match->mode & GR_DELETED))) {
48452+ match = match->next;
48453+ }
48454+
48455+ if (match && !(match->mode & GR_DELETED))
48456+ return match;
48457+ else
48458+ return NULL;
48459+}
48460+
48461+static struct name_entry *
48462+lookup_name_entry(const char *name)
48463+{
48464+ unsigned int len = strlen(name);
48465+ unsigned int key = full_name_hash(name, len);
48466+ unsigned int index = key % name_set.n_size;
48467+ struct name_entry *match;
48468+
48469+ match = name_set.n_hash[index];
48470+
48471+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
48472+ match = match->next;
48473+
48474+ return match;
48475+}
48476+
48477+static struct name_entry *
48478+lookup_name_entry_create(const char *name)
48479+{
48480+ unsigned int len = strlen(name);
48481+ unsigned int key = full_name_hash(name, len);
48482+ unsigned int index = key % name_set.n_size;
48483+ struct name_entry *match;
48484+
48485+ match = name_set.n_hash[index];
48486+
48487+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48488+ !match->deleted))
48489+ match = match->next;
48490+
48491+ if (match && match->deleted)
48492+ return match;
48493+
48494+ match = name_set.n_hash[index];
48495+
48496+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48497+ match->deleted))
48498+ match = match->next;
48499+
48500+ if (match && !match->deleted)
48501+ return match;
48502+ else
48503+ return NULL;
48504+}
48505+
48506+static struct inodev_entry *
48507+lookup_inodev_entry(const ino_t ino, const dev_t dev)
48508+{
48509+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
48510+ struct inodev_entry *match;
48511+
48512+ match = inodev_set.i_hash[index];
48513+
48514+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
48515+ match = match->next;
48516+
48517+ return match;
48518+}
48519+
48520+static void
48521+insert_inodev_entry(struct inodev_entry *entry)
48522+{
48523+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
48524+ inodev_set.i_size);
48525+ struct inodev_entry **curr;
48526+
48527+ entry->prev = NULL;
48528+
48529+ curr = &inodev_set.i_hash[index];
48530+ if (*curr != NULL)
48531+ (*curr)->prev = entry;
48532+
48533+ entry->next = *curr;
48534+ *curr = entry;
48535+
48536+ return;
48537+}
48538+
48539+static void
48540+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
48541+{
48542+ unsigned int index =
48543+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
48544+ struct acl_role_label **curr;
48545+ struct acl_role_label *tmp;
48546+
48547+ curr = &acl_role_set.r_hash[index];
48548+
48549+ /* if role was already inserted due to domains and already has
48550+ a role in the same bucket as it attached, then we need to
48551+ combine these two buckets
48552+ */
48553+ if (role->next) {
48554+ tmp = role->next;
48555+ while (tmp->next)
48556+ tmp = tmp->next;
48557+ tmp->next = *curr;
48558+ } else
48559+ role->next = *curr;
48560+ *curr = role;
48561+
48562+ return;
48563+}
48564+
48565+static void
48566+insert_acl_role_label(struct acl_role_label *role)
48567+{
48568+ int i;
48569+
48570+ if (role_list == NULL) {
48571+ role_list = role;
48572+ role->prev = NULL;
48573+ } else {
48574+ role->prev = role_list;
48575+ role_list = role;
48576+ }
48577+
48578+ /* used for hash chains */
48579+ role->next = NULL;
48580+
48581+ if (role->roletype & GR_ROLE_DOMAIN) {
48582+ for (i = 0; i < role->domain_child_num; i++)
48583+ __insert_acl_role_label(role, role->domain_children[i]);
48584+ } else
48585+ __insert_acl_role_label(role, role->uidgid);
48586+}
48587+
48588+static int
48589+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
48590+{
48591+ struct name_entry **curr, *nentry;
48592+ struct inodev_entry *ientry;
48593+ unsigned int len = strlen(name);
48594+ unsigned int key = full_name_hash(name, len);
48595+ unsigned int index = key % name_set.n_size;
48596+
48597+ curr = &name_set.n_hash[index];
48598+
48599+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
48600+ curr = &((*curr)->next);
48601+
48602+ if (*curr != NULL)
48603+ return 1;
48604+
48605+ nentry = acl_alloc(sizeof (struct name_entry));
48606+ if (nentry == NULL)
48607+ return 0;
48608+ ientry = acl_alloc(sizeof (struct inodev_entry));
48609+ if (ientry == NULL)
48610+ return 0;
48611+ ientry->nentry = nentry;
48612+
48613+ nentry->key = key;
48614+ nentry->name = name;
48615+ nentry->inode = inode;
48616+ nentry->device = device;
48617+ nentry->len = len;
48618+ nentry->deleted = deleted;
48619+
48620+ nentry->prev = NULL;
48621+ curr = &name_set.n_hash[index];
48622+ if (*curr != NULL)
48623+ (*curr)->prev = nentry;
48624+ nentry->next = *curr;
48625+ *curr = nentry;
48626+
48627+ /* insert us into the table searchable by inode/dev */
48628+ insert_inodev_entry(ientry);
48629+
48630+ return 1;
48631+}
48632+
48633+static void
48634+insert_acl_obj_label(struct acl_object_label *obj,
48635+ struct acl_subject_label *subj)
48636+{
48637+ unsigned int index =
48638+ fhash(obj->inode, obj->device, subj->obj_hash_size);
48639+ struct acl_object_label **curr;
48640+
48641+
48642+ obj->prev = NULL;
48643+
48644+ curr = &subj->obj_hash[index];
48645+ if (*curr != NULL)
48646+ (*curr)->prev = obj;
48647+
48648+ obj->next = *curr;
48649+ *curr = obj;
48650+
48651+ return;
48652+}
48653+
48654+static void
48655+insert_acl_subj_label(struct acl_subject_label *obj,
48656+ struct acl_role_label *role)
48657+{
48658+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
48659+ struct acl_subject_label **curr;
48660+
48661+ obj->prev = NULL;
48662+
48663+ curr = &role->subj_hash[index];
48664+ if (*curr != NULL)
48665+ (*curr)->prev = obj;
48666+
48667+ obj->next = *curr;
48668+ *curr = obj;
48669+
48670+ return;
48671+}
48672+
48673+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
48674+
48675+static void *
48676+create_table(__u32 * len, int elementsize)
48677+{
48678+ unsigned int table_sizes[] = {
48679+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
48680+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
48681+ 4194301, 8388593, 16777213, 33554393, 67108859
48682+ };
48683+ void *newtable = NULL;
48684+ unsigned int pwr = 0;
48685+
48686+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
48687+ table_sizes[pwr] <= *len)
48688+ pwr++;
48689+
48690+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
48691+ return newtable;
48692+
48693+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
48694+ newtable =
48695+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
48696+ else
48697+ newtable = vmalloc(table_sizes[pwr] * elementsize);
48698+
48699+ *len = table_sizes[pwr];
48700+
48701+ return newtable;
48702+}
48703+
48704+static int
48705+init_variables(const struct gr_arg *arg)
48706+{
48707+ struct task_struct *reaper = &init_task;
48708+ unsigned int stacksize;
48709+
48710+ subj_map_set.s_size = arg->role_db.num_subjects;
48711+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
48712+ name_set.n_size = arg->role_db.num_objects;
48713+ inodev_set.i_size = arg->role_db.num_objects;
48714+
48715+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
48716+ !name_set.n_size || !inodev_set.i_size)
48717+ return 1;
48718+
48719+ if (!gr_init_uidset())
48720+ return 1;
48721+
48722+ /* set up the stack that holds allocation info */
48723+
48724+ stacksize = arg->role_db.num_pointers + 5;
48725+
48726+ if (!acl_alloc_stack_init(stacksize))
48727+ return 1;
48728+
48729+ /* grab reference for the real root dentry and vfsmount */
48730+ get_fs_root(reaper->fs, &real_root);
48731+
48732+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48733+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
48734+#endif
48735+
48736+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
48737+ if (fakefs_obj_rw == NULL)
48738+ return 1;
48739+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
48740+
48741+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
48742+ if (fakefs_obj_rwx == NULL)
48743+ return 1;
48744+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
48745+
48746+ subj_map_set.s_hash =
48747+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
48748+ acl_role_set.r_hash =
48749+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
48750+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
48751+ inodev_set.i_hash =
48752+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
48753+
48754+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
48755+ !name_set.n_hash || !inodev_set.i_hash)
48756+ return 1;
48757+
48758+ memset(subj_map_set.s_hash, 0,
48759+ sizeof(struct subject_map *) * subj_map_set.s_size);
48760+ memset(acl_role_set.r_hash, 0,
48761+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
48762+ memset(name_set.n_hash, 0,
48763+ sizeof (struct name_entry *) * name_set.n_size);
48764+ memset(inodev_set.i_hash, 0,
48765+ sizeof (struct inodev_entry *) * inodev_set.i_size);
48766+
48767+ return 0;
48768+}
48769+
48770+/* free information not needed after startup
48771+ currently contains user->kernel pointer mappings for subjects
48772+*/
48773+
48774+static void
48775+free_init_variables(void)
48776+{
48777+ __u32 i;
48778+
48779+ if (subj_map_set.s_hash) {
48780+ for (i = 0; i < subj_map_set.s_size; i++) {
48781+ if (subj_map_set.s_hash[i]) {
48782+ kfree(subj_map_set.s_hash[i]);
48783+ subj_map_set.s_hash[i] = NULL;
48784+ }
48785+ }
48786+
48787+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
48788+ PAGE_SIZE)
48789+ kfree(subj_map_set.s_hash);
48790+ else
48791+ vfree(subj_map_set.s_hash);
48792+ }
48793+
48794+ return;
48795+}
48796+
48797+static void
48798+free_variables(void)
48799+{
48800+ struct acl_subject_label *s;
48801+ struct acl_role_label *r;
48802+ struct task_struct *task, *task2;
48803+ unsigned int x;
48804+
48805+ gr_clear_learn_entries();
48806+
48807+ read_lock(&tasklist_lock);
48808+ do_each_thread(task2, task) {
48809+ task->acl_sp_role = 0;
48810+ task->acl_role_id = 0;
48811+ task->acl = NULL;
48812+ task->role = NULL;
48813+ } while_each_thread(task2, task);
48814+ read_unlock(&tasklist_lock);
48815+
48816+ /* release the reference to the real root dentry and vfsmount */
48817+ path_put(&real_root);
48818+
48819+ /* free all object hash tables */
48820+
48821+ FOR_EACH_ROLE_START(r)
48822+ if (r->subj_hash == NULL)
48823+ goto next_role;
48824+ FOR_EACH_SUBJECT_START(r, s, x)
48825+ if (s->obj_hash == NULL)
48826+ break;
48827+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48828+ kfree(s->obj_hash);
48829+ else
48830+ vfree(s->obj_hash);
48831+ FOR_EACH_SUBJECT_END(s, x)
48832+ FOR_EACH_NESTED_SUBJECT_START(r, s)
48833+ if (s->obj_hash == NULL)
48834+ break;
48835+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48836+ kfree(s->obj_hash);
48837+ else
48838+ vfree(s->obj_hash);
48839+ FOR_EACH_NESTED_SUBJECT_END(s)
48840+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
48841+ kfree(r->subj_hash);
48842+ else
48843+ vfree(r->subj_hash);
48844+ r->subj_hash = NULL;
48845+next_role:
48846+ FOR_EACH_ROLE_END(r)
48847+
48848+ acl_free_all();
48849+
48850+ if (acl_role_set.r_hash) {
48851+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
48852+ PAGE_SIZE)
48853+ kfree(acl_role_set.r_hash);
48854+ else
48855+ vfree(acl_role_set.r_hash);
48856+ }
48857+ if (name_set.n_hash) {
48858+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
48859+ PAGE_SIZE)
48860+ kfree(name_set.n_hash);
48861+ else
48862+ vfree(name_set.n_hash);
48863+ }
48864+
48865+ if (inodev_set.i_hash) {
48866+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
48867+ PAGE_SIZE)
48868+ kfree(inodev_set.i_hash);
48869+ else
48870+ vfree(inodev_set.i_hash);
48871+ }
48872+
48873+ gr_free_uidset();
48874+
48875+ memset(&name_set, 0, sizeof (struct name_db));
48876+ memset(&inodev_set, 0, sizeof (struct inodev_db));
48877+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
48878+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
48879+
48880+ default_role = NULL;
48881+ role_list = NULL;
48882+
48883+ return;
48884+}
48885+
48886+static __u32
48887+count_user_objs(struct acl_object_label *userp)
48888+{
48889+ struct acl_object_label o_tmp;
48890+ __u32 num = 0;
48891+
48892+ while (userp) {
48893+ if (copy_from_user(&o_tmp, userp,
48894+ sizeof (struct acl_object_label)))
48895+ break;
48896+
48897+ userp = o_tmp.prev;
48898+ num++;
48899+ }
48900+
48901+ return num;
48902+}
48903+
48904+static struct acl_subject_label *
48905+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
48906+
48907+static int
48908+copy_user_glob(struct acl_object_label *obj)
48909+{
48910+ struct acl_object_label *g_tmp, **guser;
48911+ unsigned int len;
48912+ char *tmp;
48913+
48914+ if (obj->globbed == NULL)
48915+ return 0;
48916+
48917+ guser = &obj->globbed;
48918+ while (*guser) {
48919+ g_tmp = (struct acl_object_label *)
48920+ acl_alloc(sizeof (struct acl_object_label));
48921+ if (g_tmp == NULL)
48922+ return -ENOMEM;
48923+
48924+ if (copy_from_user(g_tmp, *guser,
48925+ sizeof (struct acl_object_label)))
48926+ return -EFAULT;
48927+
48928+ len = strnlen_user(g_tmp->filename, PATH_MAX);
48929+
48930+ if (!len || len >= PATH_MAX)
48931+ return -EINVAL;
48932+
48933+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48934+ return -ENOMEM;
48935+
48936+ if (copy_from_user(tmp, g_tmp->filename, len))
48937+ return -EFAULT;
48938+ tmp[len-1] = '\0';
48939+ g_tmp->filename = tmp;
48940+
48941+ *guser = g_tmp;
48942+ guser = &(g_tmp->next);
48943+ }
48944+
48945+ return 0;
48946+}
48947+
48948+static int
48949+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
48950+ struct acl_role_label *role)
48951+{
48952+ struct acl_object_label *o_tmp;
48953+ unsigned int len;
48954+ int ret;
48955+ char *tmp;
48956+
48957+ while (userp) {
48958+ if ((o_tmp = (struct acl_object_label *)
48959+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
48960+ return -ENOMEM;
48961+
48962+ if (copy_from_user(o_tmp, userp,
48963+ sizeof (struct acl_object_label)))
48964+ return -EFAULT;
48965+
48966+ userp = o_tmp->prev;
48967+
48968+ len = strnlen_user(o_tmp->filename, PATH_MAX);
48969+
48970+ if (!len || len >= PATH_MAX)
48971+ return -EINVAL;
48972+
48973+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48974+ return -ENOMEM;
48975+
48976+ if (copy_from_user(tmp, o_tmp->filename, len))
48977+ return -EFAULT;
48978+ tmp[len-1] = '\0';
48979+ o_tmp->filename = tmp;
48980+
48981+ insert_acl_obj_label(o_tmp, subj);
48982+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
48983+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
48984+ return -ENOMEM;
48985+
48986+ ret = copy_user_glob(o_tmp);
48987+ if (ret)
48988+ return ret;
48989+
48990+ if (o_tmp->nested) {
48991+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
48992+ if (IS_ERR(o_tmp->nested))
48993+ return PTR_ERR(o_tmp->nested);
48994+
48995+ /* insert into nested subject list */
48996+ o_tmp->nested->next = role->hash->first;
48997+ role->hash->first = o_tmp->nested;
48998+ }
48999+ }
49000+
49001+ return 0;
49002+}
49003+
49004+static __u32
49005+count_user_subjs(struct acl_subject_label *userp)
49006+{
49007+ struct acl_subject_label s_tmp;
49008+ __u32 num = 0;
49009+
49010+ while (userp) {
49011+ if (copy_from_user(&s_tmp, userp,
49012+ sizeof (struct acl_subject_label)))
49013+ break;
49014+
49015+ userp = s_tmp.prev;
49016+ /* do not count nested subjects against this count, since
49017+ they are not included in the hash table, but are
49018+ attached to objects. We have already counted
49019+ the subjects in userspace for the allocation
49020+ stack
49021+ */
49022+ if (!(s_tmp.mode & GR_NESTED))
49023+ num++;
49024+ }
49025+
49026+ return num;
49027+}
49028+
49029+static int
49030+copy_user_allowedips(struct acl_role_label *rolep)
49031+{
49032+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
49033+
49034+ ruserip = rolep->allowed_ips;
49035+
49036+ while (ruserip) {
49037+ rlast = rtmp;
49038+
49039+ if ((rtmp = (struct role_allowed_ip *)
49040+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
49041+ return -ENOMEM;
49042+
49043+ if (copy_from_user(rtmp, ruserip,
49044+ sizeof (struct role_allowed_ip)))
49045+ return -EFAULT;
49046+
49047+ ruserip = rtmp->prev;
49048+
49049+ if (!rlast) {
49050+ rtmp->prev = NULL;
49051+ rolep->allowed_ips = rtmp;
49052+ } else {
49053+ rlast->next = rtmp;
49054+ rtmp->prev = rlast;
49055+ }
49056+
49057+ if (!ruserip)
49058+ rtmp->next = NULL;
49059+ }
49060+
49061+ return 0;
49062+}
49063+
49064+static int
49065+copy_user_transitions(struct acl_role_label *rolep)
49066+{
49067+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
49068+
49069+ unsigned int len;
49070+ char *tmp;
49071+
49072+ rusertp = rolep->transitions;
49073+
49074+ while (rusertp) {
49075+ rlast = rtmp;
49076+
49077+ if ((rtmp = (struct role_transition *)
49078+ acl_alloc(sizeof (struct role_transition))) == NULL)
49079+ return -ENOMEM;
49080+
49081+ if (copy_from_user(rtmp, rusertp,
49082+ sizeof (struct role_transition)))
49083+ return -EFAULT;
49084+
49085+ rusertp = rtmp->prev;
49086+
49087+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
49088+
49089+ if (!len || len >= GR_SPROLE_LEN)
49090+ return -EINVAL;
49091+
49092+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49093+ return -ENOMEM;
49094+
49095+ if (copy_from_user(tmp, rtmp->rolename, len))
49096+ return -EFAULT;
49097+ tmp[len-1] = '\0';
49098+ rtmp->rolename = tmp;
49099+
49100+ if (!rlast) {
49101+ rtmp->prev = NULL;
49102+ rolep->transitions = rtmp;
49103+ } else {
49104+ rlast->next = rtmp;
49105+ rtmp->prev = rlast;
49106+ }
49107+
49108+ if (!rusertp)
49109+ rtmp->next = NULL;
49110+ }
49111+
49112+ return 0;
49113+}
49114+
49115+static struct acl_subject_label *
49116+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
49117+{
49118+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
49119+ unsigned int len;
49120+ char *tmp;
49121+ __u32 num_objs;
49122+ struct acl_ip_label **i_tmp, *i_utmp2;
49123+ struct gr_hash_struct ghash;
49124+ struct subject_map *subjmap;
49125+ unsigned int i_num;
49126+ int err;
49127+
49128+ s_tmp = lookup_subject_map(userp);
49129+
49130+ /* we've already copied this subject into the kernel, just return
49131+ the reference to it, and don't copy it over again
49132+ */
49133+ if (s_tmp)
49134+ return(s_tmp);
49135+
49136+ if ((s_tmp = (struct acl_subject_label *)
49137+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
49138+ return ERR_PTR(-ENOMEM);
49139+
49140+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
49141+ if (subjmap == NULL)
49142+ return ERR_PTR(-ENOMEM);
49143+
49144+ subjmap->user = userp;
49145+ subjmap->kernel = s_tmp;
49146+ insert_subj_map_entry(subjmap);
49147+
49148+ if (copy_from_user(s_tmp, userp,
49149+ sizeof (struct acl_subject_label)))
49150+ return ERR_PTR(-EFAULT);
49151+
49152+ len = strnlen_user(s_tmp->filename, PATH_MAX);
49153+
49154+ if (!len || len >= PATH_MAX)
49155+ return ERR_PTR(-EINVAL);
49156+
49157+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49158+ return ERR_PTR(-ENOMEM);
49159+
49160+ if (copy_from_user(tmp, s_tmp->filename, len))
49161+ return ERR_PTR(-EFAULT);
49162+ tmp[len-1] = '\0';
49163+ s_tmp->filename = tmp;
49164+
49165+ if (!strcmp(s_tmp->filename, "/"))
49166+ role->root_label = s_tmp;
49167+
49168+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
49169+ return ERR_PTR(-EFAULT);
49170+
49171+ /* copy user and group transition tables */
49172+
49173+ if (s_tmp->user_trans_num) {
49174+ uid_t *uidlist;
49175+
49176+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
49177+ if (uidlist == NULL)
49178+ return ERR_PTR(-ENOMEM);
49179+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
49180+ return ERR_PTR(-EFAULT);
49181+
49182+ s_tmp->user_transitions = uidlist;
49183+ }
49184+
49185+ if (s_tmp->group_trans_num) {
49186+ gid_t *gidlist;
49187+
49188+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
49189+ if (gidlist == NULL)
49190+ return ERR_PTR(-ENOMEM);
49191+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
49192+ return ERR_PTR(-EFAULT);
49193+
49194+ s_tmp->group_transitions = gidlist;
49195+ }
49196+
49197+ /* set up object hash table */
49198+ num_objs = count_user_objs(ghash.first);
49199+
49200+ s_tmp->obj_hash_size = num_objs;
49201+ s_tmp->obj_hash =
49202+ (struct acl_object_label **)
49203+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
49204+
49205+ if (!s_tmp->obj_hash)
49206+ return ERR_PTR(-ENOMEM);
49207+
49208+ memset(s_tmp->obj_hash, 0,
49209+ s_tmp->obj_hash_size *
49210+ sizeof (struct acl_object_label *));
49211+
49212+ /* add in objects */
49213+ err = copy_user_objs(ghash.first, s_tmp, role);
49214+
49215+ if (err)
49216+ return ERR_PTR(err);
49217+
49218+ /* set pointer for parent subject */
49219+ if (s_tmp->parent_subject) {
49220+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
49221+
49222+ if (IS_ERR(s_tmp2))
49223+ return s_tmp2;
49224+
49225+ s_tmp->parent_subject = s_tmp2;
49226+ }
49227+
49228+ /* add in ip acls */
49229+
49230+ if (!s_tmp->ip_num) {
49231+ s_tmp->ips = NULL;
49232+ goto insert;
49233+ }
49234+
49235+ i_tmp =
49236+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
49237+ sizeof (struct acl_ip_label *));
49238+
49239+ if (!i_tmp)
49240+ return ERR_PTR(-ENOMEM);
49241+
49242+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
49243+ *(i_tmp + i_num) =
49244+ (struct acl_ip_label *)
49245+ acl_alloc(sizeof (struct acl_ip_label));
49246+ if (!*(i_tmp + i_num))
49247+ return ERR_PTR(-ENOMEM);
49248+
49249+ if (copy_from_user
49250+ (&i_utmp2, s_tmp->ips + i_num,
49251+ sizeof (struct acl_ip_label *)))
49252+ return ERR_PTR(-EFAULT);
49253+
49254+ if (copy_from_user
49255+ (*(i_tmp + i_num), i_utmp2,
49256+ sizeof (struct acl_ip_label)))
49257+ return ERR_PTR(-EFAULT);
49258+
49259+ if ((*(i_tmp + i_num))->iface == NULL)
49260+ continue;
49261+
49262+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
49263+ if (!len || len >= IFNAMSIZ)
49264+ return ERR_PTR(-EINVAL);
49265+ tmp = acl_alloc(len);
49266+ if (tmp == NULL)
49267+ return ERR_PTR(-ENOMEM);
49268+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
49269+ return ERR_PTR(-EFAULT);
49270+ (*(i_tmp + i_num))->iface = tmp;
49271+ }
49272+
49273+ s_tmp->ips = i_tmp;
49274+
49275+insert:
49276+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
49277+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
49278+ return ERR_PTR(-ENOMEM);
49279+
49280+ return s_tmp;
49281+}
49282+
49283+static int
49284+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
49285+{
49286+ struct acl_subject_label s_pre;
49287+ struct acl_subject_label * ret;
49288+ int err;
49289+
49290+ while (userp) {
49291+ if (copy_from_user(&s_pre, userp,
49292+ sizeof (struct acl_subject_label)))
49293+ return -EFAULT;
49294+
49295+ /* do not add nested subjects here, add
49296+ while parsing objects
49297+ */
49298+
49299+ if (s_pre.mode & GR_NESTED) {
49300+ userp = s_pre.prev;
49301+ continue;
49302+ }
49303+
49304+ ret = do_copy_user_subj(userp, role);
49305+
49306+ err = PTR_ERR(ret);
49307+ if (IS_ERR(ret))
49308+ return err;
49309+
49310+ insert_acl_subj_label(ret, role);
49311+
49312+ userp = s_pre.prev;
49313+ }
49314+
49315+ return 0;
49316+}
49317+
49318+static int
49319+copy_user_acl(struct gr_arg *arg)
49320+{
49321+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
49322+ struct sprole_pw *sptmp;
49323+ struct gr_hash_struct *ghash;
49324+ uid_t *domainlist;
49325+ unsigned int r_num;
49326+ unsigned int len;
49327+ char *tmp;
49328+ int err = 0;
49329+ __u16 i;
49330+ __u32 num_subjs;
49331+
49332+ /* we need a default and kernel role */
49333+ if (arg->role_db.num_roles < 2)
49334+ return -EINVAL;
49335+
49336+ /* copy special role authentication info from userspace */
49337+
49338+ num_sprole_pws = arg->num_sprole_pws;
49339+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
49340+
49341+ if (!acl_special_roles) {
49342+ err = -ENOMEM;
49343+ goto cleanup;
49344+ }
49345+
49346+ for (i = 0; i < num_sprole_pws; i++) {
49347+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
49348+ if (!sptmp) {
49349+ err = -ENOMEM;
49350+ goto cleanup;
49351+ }
49352+ if (copy_from_user(sptmp, arg->sprole_pws + i,
49353+ sizeof (struct sprole_pw))) {
49354+ err = -EFAULT;
49355+ goto cleanup;
49356+ }
49357+
49358+ len =
49359+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
49360+
49361+ if (!len || len >= GR_SPROLE_LEN) {
49362+ err = -EINVAL;
49363+ goto cleanup;
49364+ }
49365+
49366+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49367+ err = -ENOMEM;
49368+ goto cleanup;
49369+ }
49370+
49371+ if (copy_from_user(tmp, sptmp->rolename, len)) {
49372+ err = -EFAULT;
49373+ goto cleanup;
49374+ }
49375+ tmp[len-1] = '\0';
49376+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49377+ printk(KERN_ALERT "Copying special role %s\n", tmp);
49378+#endif
49379+ sptmp->rolename = tmp;
49380+ acl_special_roles[i] = sptmp;
49381+ }
49382+
49383+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
49384+
49385+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
49386+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
49387+
49388+ if (!r_tmp) {
49389+ err = -ENOMEM;
49390+ goto cleanup;
49391+ }
49392+
49393+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
49394+ sizeof (struct acl_role_label *))) {
49395+ err = -EFAULT;
49396+ goto cleanup;
49397+ }
49398+
49399+ if (copy_from_user(r_tmp, r_utmp2,
49400+ sizeof (struct acl_role_label))) {
49401+ err = -EFAULT;
49402+ goto cleanup;
49403+ }
49404+
49405+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
49406+
49407+ if (!len || len >= PATH_MAX) {
49408+ err = -EINVAL;
49409+ goto cleanup;
49410+ }
49411+
49412+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49413+ err = -ENOMEM;
49414+ goto cleanup;
49415+ }
49416+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
49417+ err = -EFAULT;
49418+ goto cleanup;
49419+ }
49420+ tmp[len-1] = '\0';
49421+ r_tmp->rolename = tmp;
49422+
49423+ if (!strcmp(r_tmp->rolename, "default")
49424+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
49425+ default_role = r_tmp;
49426+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
49427+ kernel_role = r_tmp;
49428+ }
49429+
49430+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
49431+ err = -ENOMEM;
49432+ goto cleanup;
49433+ }
49434+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
49435+ err = -EFAULT;
49436+ goto cleanup;
49437+ }
49438+
49439+ r_tmp->hash = ghash;
49440+
49441+ num_subjs = count_user_subjs(r_tmp->hash->first);
49442+
49443+ r_tmp->subj_hash_size = num_subjs;
49444+ r_tmp->subj_hash =
49445+ (struct acl_subject_label **)
49446+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
49447+
49448+ if (!r_tmp->subj_hash) {
49449+ err = -ENOMEM;
49450+ goto cleanup;
49451+ }
49452+
49453+ err = copy_user_allowedips(r_tmp);
49454+ if (err)
49455+ goto cleanup;
49456+
49457+ /* copy domain info */
49458+ if (r_tmp->domain_children != NULL) {
49459+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
49460+ if (domainlist == NULL) {
49461+ err = -ENOMEM;
49462+ goto cleanup;
49463+ }
49464+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
49465+ err = -EFAULT;
49466+ goto cleanup;
49467+ }
49468+ r_tmp->domain_children = domainlist;
49469+ }
49470+
49471+ err = copy_user_transitions(r_tmp);
49472+ if (err)
49473+ goto cleanup;
49474+
49475+ memset(r_tmp->subj_hash, 0,
49476+ r_tmp->subj_hash_size *
49477+ sizeof (struct acl_subject_label *));
49478+
49479+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
49480+
49481+ if (err)
49482+ goto cleanup;
49483+
49484+ /* set nested subject list to null */
49485+ r_tmp->hash->first = NULL;
49486+
49487+ insert_acl_role_label(r_tmp);
49488+ }
49489+
49490+ goto return_err;
49491+ cleanup:
49492+ free_variables();
49493+ return_err:
49494+ return err;
49495+
49496+}
49497+
49498+static int
49499+gracl_init(struct gr_arg *args)
49500+{
49501+ int error = 0;
49502+
49503+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
49504+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
49505+
49506+ if (init_variables(args)) {
49507+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
49508+ error = -ENOMEM;
49509+ free_variables();
49510+ goto out;
49511+ }
49512+
49513+ error = copy_user_acl(args);
49514+ free_init_variables();
49515+ if (error) {
49516+ free_variables();
49517+ goto out;
49518+ }
49519+
49520+ if ((error = gr_set_acls(0))) {
49521+ free_variables();
49522+ goto out;
49523+ }
49524+
49525+ pax_open_kernel();
49526+ gr_status |= GR_READY;
49527+ pax_close_kernel();
49528+
49529+ out:
49530+ return error;
49531+}
49532+
49533+/* derived from glibc fnmatch() 0: match, 1: no match*/
49534+
49535+static int
49536+glob_match(const char *p, const char *n)
49537+{
49538+ char c;
49539+
49540+ while ((c = *p++) != '\0') {
49541+ switch (c) {
49542+ case '?':
49543+ if (*n == '\0')
49544+ return 1;
49545+ else if (*n == '/')
49546+ return 1;
49547+ break;
49548+ case '\\':
49549+ if (*n != c)
49550+ return 1;
49551+ break;
49552+ case '*':
49553+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
49554+ if (*n == '/')
49555+ return 1;
49556+ else if (c == '?') {
49557+ if (*n == '\0')
49558+ return 1;
49559+ else
49560+ ++n;
49561+ }
49562+ }
49563+ if (c == '\0') {
49564+ return 0;
49565+ } else {
49566+ const char *endp;
49567+
49568+ if ((endp = strchr(n, '/')) == NULL)
49569+ endp = n + strlen(n);
49570+
49571+ if (c == '[') {
49572+ for (--p; n < endp; ++n)
49573+ if (!glob_match(p, n))
49574+ return 0;
49575+ } else if (c == '/') {
49576+ while (*n != '\0' && *n != '/')
49577+ ++n;
49578+ if (*n == '/' && !glob_match(p, n + 1))
49579+ return 0;
49580+ } else {
49581+ for (--p; n < endp; ++n)
49582+ if (*n == c && !glob_match(p, n))
49583+ return 0;
49584+ }
49585+
49586+ return 1;
49587+ }
49588+ case '[':
49589+ {
49590+ int not;
49591+ char cold;
49592+
49593+ if (*n == '\0' || *n == '/')
49594+ return 1;
49595+
49596+ not = (*p == '!' || *p == '^');
49597+ if (not)
49598+ ++p;
49599+
49600+ c = *p++;
49601+ for (;;) {
49602+ unsigned char fn = (unsigned char)*n;
49603+
49604+ if (c == '\0')
49605+ return 1;
49606+ else {
49607+ if (c == fn)
49608+ goto matched;
49609+ cold = c;
49610+ c = *p++;
49611+
49612+ if (c == '-' && *p != ']') {
49613+ unsigned char cend = *p++;
49614+
49615+ if (cend == '\0')
49616+ return 1;
49617+
49618+ if (cold <= fn && fn <= cend)
49619+ goto matched;
49620+
49621+ c = *p++;
49622+ }
49623+ }
49624+
49625+ if (c == ']')
49626+ break;
49627+ }
49628+ if (!not)
49629+ return 1;
49630+ break;
49631+ matched:
49632+ while (c != ']') {
49633+ if (c == '\0')
49634+ return 1;
49635+
49636+ c = *p++;
49637+ }
49638+ if (not)
49639+ return 1;
49640+ }
49641+ break;
49642+ default:
49643+ if (c != *n)
49644+ return 1;
49645+ }
49646+
49647+ ++n;
49648+ }
49649+
49650+ if (*n == '\0')
49651+ return 0;
49652+
49653+ if (*n == '/')
49654+ return 0;
49655+
49656+ return 1;
49657+}
49658+
49659+static struct acl_object_label *
49660+chk_glob_label(struct acl_object_label *globbed,
49661+ struct dentry *dentry, struct vfsmount *mnt, char **path)
49662+{
49663+ struct acl_object_label *tmp;
49664+
49665+ if (*path == NULL)
49666+ *path = gr_to_filename_nolock(dentry, mnt);
49667+
49668+ tmp = globbed;
49669+
49670+ while (tmp) {
49671+ if (!glob_match(tmp->filename, *path))
49672+ return tmp;
49673+ tmp = tmp->next;
49674+ }
49675+
49676+ return NULL;
49677+}
49678+
49679+static struct acl_object_label *
49680+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49681+ const ino_t curr_ino, const dev_t curr_dev,
49682+ const struct acl_subject_label *subj, char **path, const int checkglob)
49683+{
49684+ struct acl_subject_label *tmpsubj;
49685+ struct acl_object_label *retval;
49686+ struct acl_object_label *retval2;
49687+
49688+ tmpsubj = (struct acl_subject_label *) subj;
49689+ read_lock(&gr_inode_lock);
49690+ do {
49691+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
49692+ if (retval) {
49693+ if (checkglob && retval->globbed) {
49694+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
49695+ (struct vfsmount *)orig_mnt, path);
49696+ if (retval2)
49697+ retval = retval2;
49698+ }
49699+ break;
49700+ }
49701+ } while ((tmpsubj = tmpsubj->parent_subject));
49702+ read_unlock(&gr_inode_lock);
49703+
49704+ return retval;
49705+}
49706+
49707+static __inline__ struct acl_object_label *
49708+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49709+ struct dentry *curr_dentry,
49710+ const struct acl_subject_label *subj, char **path, const int checkglob)
49711+{
49712+ int newglob = checkglob;
49713+ ino_t inode;
49714+ dev_t device;
49715+
49716+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
49717+ as we don't want a / * rule to match instead of the / object
49718+ don't do this for create lookups that call this function though, since they're looking up
49719+ on the parent and thus need globbing checks on all paths
49720+ */
49721+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
49722+ newglob = GR_NO_GLOB;
49723+
49724+ spin_lock(&curr_dentry->d_lock);
49725+ inode = curr_dentry->d_inode->i_ino;
49726+ device = __get_dev(curr_dentry);
49727+ spin_unlock(&curr_dentry->d_lock);
49728+
49729+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
49730+}
49731+
49732+static struct acl_object_label *
49733+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49734+ const struct acl_subject_label *subj, char *path, const int checkglob)
49735+{
49736+ struct dentry *dentry = (struct dentry *) l_dentry;
49737+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49738+ struct acl_object_label *retval;
49739+ struct dentry *parent;
49740+
49741+ write_seqlock(&rename_lock);
49742+ br_read_lock(vfsmount_lock);
49743+
49744+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
49745+#ifdef CONFIG_NET
49746+ mnt == sock_mnt ||
49747+#endif
49748+#ifdef CONFIG_HUGETLBFS
49749+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
49750+#endif
49751+ /* ignore Eric Biederman */
49752+ IS_PRIVATE(l_dentry->d_inode))) {
49753+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
49754+ goto out;
49755+ }
49756+
49757+ for (;;) {
49758+ if (dentry == real_root.dentry && mnt == real_root.mnt)
49759+ break;
49760+
49761+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49762+ if (mnt->mnt_parent == mnt)
49763+ break;
49764+
49765+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49766+ if (retval != NULL)
49767+ goto out;
49768+
49769+ dentry = mnt->mnt_mountpoint;
49770+ mnt = mnt->mnt_parent;
49771+ continue;
49772+ }
49773+
49774+ parent = dentry->d_parent;
49775+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49776+ if (retval != NULL)
49777+ goto out;
49778+
49779+ dentry = parent;
49780+ }
49781+
49782+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49783+
49784+ /* real_root is pinned so we don't have to hold a reference */
49785+ if (retval == NULL)
49786+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
49787+out:
49788+ br_read_unlock(vfsmount_lock);
49789+ write_sequnlock(&rename_lock);
49790+
49791+ BUG_ON(retval == NULL);
49792+
49793+ return retval;
49794+}
49795+
49796+static __inline__ struct acl_object_label *
49797+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49798+ const struct acl_subject_label *subj)
49799+{
49800+ char *path = NULL;
49801+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
49802+}
49803+
49804+static __inline__ struct acl_object_label *
49805+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49806+ const struct acl_subject_label *subj)
49807+{
49808+ char *path = NULL;
49809+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
49810+}
49811+
49812+static __inline__ struct acl_object_label *
49813+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49814+ const struct acl_subject_label *subj, char *path)
49815+{
49816+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
49817+}
49818+
49819+static struct acl_subject_label *
49820+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49821+ const struct acl_role_label *role)
49822+{
49823+ struct dentry *dentry = (struct dentry *) l_dentry;
49824+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49825+ struct acl_subject_label *retval;
49826+ struct dentry *parent;
49827+
49828+ write_seqlock(&rename_lock);
49829+ br_read_lock(vfsmount_lock);
49830+
49831+ for (;;) {
49832+ if (dentry == real_root.dentry && mnt == real_root.mnt)
49833+ break;
49834+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49835+ if (mnt->mnt_parent == mnt)
49836+ break;
49837+
49838+ spin_lock(&dentry->d_lock);
49839+ read_lock(&gr_inode_lock);
49840+ retval =
49841+ lookup_acl_subj_label(dentry->d_inode->i_ino,
49842+ __get_dev(dentry), role);
49843+ read_unlock(&gr_inode_lock);
49844+ spin_unlock(&dentry->d_lock);
49845+ if (retval != NULL)
49846+ goto out;
49847+
49848+ dentry = mnt->mnt_mountpoint;
49849+ mnt = mnt->mnt_parent;
49850+ continue;
49851+ }
49852+
49853+ spin_lock(&dentry->d_lock);
49854+ read_lock(&gr_inode_lock);
49855+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49856+ __get_dev(dentry), role);
49857+ read_unlock(&gr_inode_lock);
49858+ parent = dentry->d_parent;
49859+ spin_unlock(&dentry->d_lock);
49860+
49861+ if (retval != NULL)
49862+ goto out;
49863+
49864+ dentry = parent;
49865+ }
49866+
49867+ spin_lock(&dentry->d_lock);
49868+ read_lock(&gr_inode_lock);
49869+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49870+ __get_dev(dentry), role);
49871+ read_unlock(&gr_inode_lock);
49872+ spin_unlock(&dentry->d_lock);
49873+
49874+ if (unlikely(retval == NULL)) {
49875+ /* real_root is pinned, we don't need to hold a reference */
49876+ read_lock(&gr_inode_lock);
49877+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
49878+ __get_dev(real_root.dentry), role);
49879+ read_unlock(&gr_inode_lock);
49880+ }
49881+out:
49882+ br_read_unlock(vfsmount_lock);
49883+ write_sequnlock(&rename_lock);
49884+
49885+ BUG_ON(retval == NULL);
49886+
49887+ return retval;
49888+}
49889+
49890+static void
49891+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
49892+{
49893+ struct task_struct *task = current;
49894+ const struct cred *cred = current_cred();
49895+
49896+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49897+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49898+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49899+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
49900+
49901+ return;
49902+}
49903+
49904+static void
49905+gr_log_learn_sysctl(const char *path, const __u32 mode)
49906+{
49907+ struct task_struct *task = current;
49908+ const struct cred *cred = current_cred();
49909+
49910+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49911+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49912+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49913+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
49914+
49915+ return;
49916+}
49917+
49918+static void
49919+gr_log_learn_id_change(const char type, const unsigned int real,
49920+ const unsigned int effective, const unsigned int fs)
49921+{
49922+ struct task_struct *task = current;
49923+ const struct cred *cred = current_cred();
49924+
49925+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
49926+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49927+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49928+ type, real, effective, fs, &task->signal->saved_ip);
49929+
49930+ return;
49931+}
49932+
49933+__u32
49934+gr_search_file(const struct dentry * dentry, const __u32 mode,
49935+ const struct vfsmount * mnt)
49936+{
49937+ __u32 retval = mode;
49938+ struct acl_subject_label *curracl;
49939+ struct acl_object_label *currobj;
49940+
49941+ if (unlikely(!(gr_status & GR_READY)))
49942+ return (mode & ~GR_AUDITS);
49943+
49944+ curracl = current->acl;
49945+
49946+ currobj = chk_obj_label(dentry, mnt, curracl);
49947+ retval = currobj->mode & mode;
49948+
49949+ /* if we're opening a specified transfer file for writing
49950+ (e.g. /dev/initctl), then transfer our role to init
49951+ */
49952+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
49953+ current->role->roletype & GR_ROLE_PERSIST)) {
49954+ struct task_struct *task = init_pid_ns.child_reaper;
49955+
49956+ if (task->role != current->role) {
49957+ task->acl_sp_role = 0;
49958+ task->acl_role_id = current->acl_role_id;
49959+ task->role = current->role;
49960+ rcu_read_lock();
49961+ read_lock(&grsec_exec_file_lock);
49962+ gr_apply_subject_to_task(task);
49963+ read_unlock(&grsec_exec_file_lock);
49964+ rcu_read_unlock();
49965+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
49966+ }
49967+ }
49968+
49969+ if (unlikely
49970+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
49971+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
49972+ __u32 new_mode = mode;
49973+
49974+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49975+
49976+ retval = new_mode;
49977+
49978+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
49979+ new_mode |= GR_INHERIT;
49980+
49981+ if (!(mode & GR_NOLEARN))
49982+ gr_log_learn(dentry, mnt, new_mode);
49983+ }
49984+
49985+ return retval;
49986+}
49987+
49988+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
49989+ const struct dentry *parent,
49990+ const struct vfsmount *mnt)
49991+{
49992+ struct name_entry *match;
49993+ struct acl_object_label *matchpo;
49994+ struct acl_subject_label *curracl;
49995+ char *path;
49996+
49997+ if (unlikely(!(gr_status & GR_READY)))
49998+ return NULL;
49999+
50000+ preempt_disable();
50001+ path = gr_to_filename_rbac(new_dentry, mnt);
50002+ match = lookup_name_entry_create(path);
50003+
50004+ curracl = current->acl;
50005+
50006+ if (match) {
50007+ read_lock(&gr_inode_lock);
50008+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
50009+ read_unlock(&gr_inode_lock);
50010+
50011+ if (matchpo) {
50012+ preempt_enable();
50013+ return matchpo;
50014+ }
50015+ }
50016+
50017+ // lookup parent
50018+
50019+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
50020+
50021+ preempt_enable();
50022+ return matchpo;
50023+}
50024+
50025+__u32
50026+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
50027+ const struct vfsmount * mnt, const __u32 mode)
50028+{
50029+ struct acl_object_label *matchpo;
50030+ __u32 retval;
50031+
50032+ if (unlikely(!(gr_status & GR_READY)))
50033+ return (mode & ~GR_AUDITS);
50034+
50035+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
50036+
50037+ retval = matchpo->mode & mode;
50038+
50039+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
50040+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50041+ __u32 new_mode = mode;
50042+
50043+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50044+
50045+ gr_log_learn(new_dentry, mnt, new_mode);
50046+ return new_mode;
50047+ }
50048+
50049+ return retval;
50050+}
50051+
50052+__u32
50053+gr_check_link(const struct dentry * new_dentry,
50054+ const struct dentry * parent_dentry,
50055+ const struct vfsmount * parent_mnt,
50056+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
50057+{
50058+ struct acl_object_label *obj;
50059+ __u32 oldmode, newmode;
50060+ __u32 needmode;
50061+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
50062+ GR_DELETE | GR_INHERIT;
50063+
50064+ if (unlikely(!(gr_status & GR_READY)))
50065+ return (GR_CREATE | GR_LINK);
50066+
50067+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
50068+ oldmode = obj->mode;
50069+
50070+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
50071+ newmode = obj->mode;
50072+
50073+ needmode = newmode & checkmodes;
50074+
50075+ // old name for hardlink must have at least the permissions of the new name
50076+ if ((oldmode & needmode) != needmode)
50077+ goto bad;
50078+
50079+ // if old name had restrictions/auditing, make sure the new name does as well
50080+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
50081+
50082+ // don't allow hardlinking of suid/sgid files without permission
50083+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50084+ needmode |= GR_SETID;
50085+
50086+ if ((newmode & needmode) != needmode)
50087+ goto bad;
50088+
50089+ // enforce minimum permissions
50090+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
50091+ return newmode;
50092+bad:
50093+ needmode = oldmode;
50094+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50095+ needmode |= GR_SETID;
50096+
50097+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
50098+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
50099+ return (GR_CREATE | GR_LINK);
50100+ } else if (newmode & GR_SUPPRESS)
50101+ return GR_SUPPRESS;
50102+ else
50103+ return 0;
50104+}
50105+
50106+int
50107+gr_check_hidden_task(const struct task_struct *task)
50108+{
50109+ if (unlikely(!(gr_status & GR_READY)))
50110+ return 0;
50111+
50112+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
50113+ return 1;
50114+
50115+ return 0;
50116+}
50117+
50118+int
50119+gr_check_protected_task(const struct task_struct *task)
50120+{
50121+ if (unlikely(!(gr_status & GR_READY) || !task))
50122+ return 0;
50123+
50124+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50125+ task->acl != current->acl)
50126+ return 1;
50127+
50128+ return 0;
50129+}
50130+
50131+int
50132+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50133+{
50134+ struct task_struct *p;
50135+ int ret = 0;
50136+
50137+ if (unlikely(!(gr_status & GR_READY) || !pid))
50138+ return ret;
50139+
50140+ read_lock(&tasklist_lock);
50141+ do_each_pid_task(pid, type, p) {
50142+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50143+ p->acl != current->acl) {
50144+ ret = 1;
50145+ goto out;
50146+ }
50147+ } while_each_pid_task(pid, type, p);
50148+out:
50149+ read_unlock(&tasklist_lock);
50150+
50151+ return ret;
50152+}
50153+
50154+void
50155+gr_copy_label(struct task_struct *tsk)
50156+{
50157+ tsk->signal->used_accept = 0;
50158+ tsk->acl_sp_role = 0;
50159+ tsk->acl_role_id = current->acl_role_id;
50160+ tsk->acl = current->acl;
50161+ tsk->role = current->role;
50162+ tsk->signal->curr_ip = current->signal->curr_ip;
50163+ tsk->signal->saved_ip = current->signal->saved_ip;
50164+ if (current->exec_file)
50165+ get_file(current->exec_file);
50166+ tsk->exec_file = current->exec_file;
50167+ tsk->is_writable = current->is_writable;
50168+ if (unlikely(current->signal->used_accept)) {
50169+ current->signal->curr_ip = 0;
50170+ current->signal->saved_ip = 0;
50171+ }
50172+
50173+ return;
50174+}
50175+
50176+static void
50177+gr_set_proc_res(struct task_struct *task)
50178+{
50179+ struct acl_subject_label *proc;
50180+ unsigned short i;
50181+
50182+ proc = task->acl;
50183+
50184+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
50185+ return;
50186+
50187+ for (i = 0; i < RLIM_NLIMITS; i++) {
50188+ if (!(proc->resmask & (1 << i)))
50189+ continue;
50190+
50191+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
50192+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
50193+ }
50194+
50195+ return;
50196+}
50197+
50198+extern int __gr_process_user_ban(struct user_struct *user);
50199+
50200+int
50201+gr_check_user_change(int real, int effective, int fs)
50202+{
50203+ unsigned int i;
50204+ __u16 num;
50205+ uid_t *uidlist;
50206+ int curuid;
50207+ int realok = 0;
50208+ int effectiveok = 0;
50209+ int fsok = 0;
50210+
50211+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
50212+ struct user_struct *user;
50213+
50214+ if (real == -1)
50215+ goto skipit;
50216+
50217+ user = find_user(real);
50218+ if (user == NULL)
50219+ goto skipit;
50220+
50221+ if (__gr_process_user_ban(user)) {
50222+ /* for find_user */
50223+ free_uid(user);
50224+ return 1;
50225+ }
50226+
50227+ /* for find_user */
50228+ free_uid(user);
50229+
50230+skipit:
50231+#endif
50232+
50233+ if (unlikely(!(gr_status & GR_READY)))
50234+ return 0;
50235+
50236+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50237+ gr_log_learn_id_change('u', real, effective, fs);
50238+
50239+ num = current->acl->user_trans_num;
50240+ uidlist = current->acl->user_transitions;
50241+
50242+ if (uidlist == NULL)
50243+ return 0;
50244+
50245+ if (real == -1)
50246+ realok = 1;
50247+ if (effective == -1)
50248+ effectiveok = 1;
50249+ if (fs == -1)
50250+ fsok = 1;
50251+
50252+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
50253+ for (i = 0; i < num; i++) {
50254+ curuid = (int)uidlist[i];
50255+ if (real == curuid)
50256+ realok = 1;
50257+ if (effective == curuid)
50258+ effectiveok = 1;
50259+ if (fs == curuid)
50260+ fsok = 1;
50261+ }
50262+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
50263+ for (i = 0; i < num; i++) {
50264+ curuid = (int)uidlist[i];
50265+ if (real == curuid)
50266+ break;
50267+ if (effective == curuid)
50268+ break;
50269+ if (fs == curuid)
50270+ break;
50271+ }
50272+ /* not in deny list */
50273+ if (i == num) {
50274+ realok = 1;
50275+ effectiveok = 1;
50276+ fsok = 1;
50277+ }
50278+ }
50279+
50280+ if (realok && effectiveok && fsok)
50281+ return 0;
50282+ else {
50283+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50284+ return 1;
50285+ }
50286+}
50287+
50288+int
50289+gr_check_group_change(int real, int effective, int fs)
50290+{
50291+ unsigned int i;
50292+ __u16 num;
50293+ gid_t *gidlist;
50294+ int curgid;
50295+ int realok = 0;
50296+ int effectiveok = 0;
50297+ int fsok = 0;
50298+
50299+ if (unlikely(!(gr_status & GR_READY)))
50300+ return 0;
50301+
50302+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50303+ gr_log_learn_id_change('g', real, effective, fs);
50304+
50305+ num = current->acl->group_trans_num;
50306+ gidlist = current->acl->group_transitions;
50307+
50308+ if (gidlist == NULL)
50309+ return 0;
50310+
50311+ if (real == -1)
50312+ realok = 1;
50313+ if (effective == -1)
50314+ effectiveok = 1;
50315+ if (fs == -1)
50316+ fsok = 1;
50317+
50318+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
50319+ for (i = 0; i < num; i++) {
50320+ curgid = (int)gidlist[i];
50321+ if (real == curgid)
50322+ realok = 1;
50323+ if (effective == curgid)
50324+ effectiveok = 1;
50325+ if (fs == curgid)
50326+ fsok = 1;
50327+ }
50328+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
50329+ for (i = 0; i < num; i++) {
50330+ curgid = (int)gidlist[i];
50331+ if (real == curgid)
50332+ break;
50333+ if (effective == curgid)
50334+ break;
50335+ if (fs == curgid)
50336+ break;
50337+ }
50338+ /* not in deny list */
50339+ if (i == num) {
50340+ realok = 1;
50341+ effectiveok = 1;
50342+ fsok = 1;
50343+ }
50344+ }
50345+
50346+ if (realok && effectiveok && fsok)
50347+ return 0;
50348+ else {
50349+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50350+ return 1;
50351+ }
50352+}
50353+
50354+void
50355+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
50356+{
50357+ struct acl_role_label *role = task->role;
50358+ struct acl_subject_label *subj = NULL;
50359+ struct acl_object_label *obj;
50360+ struct file *filp;
50361+
50362+ if (unlikely(!(gr_status & GR_READY)))
50363+ return;
50364+
50365+ filp = task->exec_file;
50366+
50367+ /* kernel process, we'll give them the kernel role */
50368+ if (unlikely(!filp)) {
50369+ task->role = kernel_role;
50370+ task->acl = kernel_role->root_label;
50371+ return;
50372+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
50373+ role = lookup_acl_role_label(task, uid, gid);
50374+
50375+ /* perform subject lookup in possibly new role
50376+ we can use this result below in the case where role == task->role
50377+ */
50378+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
50379+
50380+ /* if we changed uid/gid, but result in the same role
50381+ and are using inheritance, don't lose the inherited subject
50382+ if current subject is other than what normal lookup
50383+ would result in, we arrived via inheritance, don't
50384+ lose subject
50385+ */
50386+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
50387+ (subj == task->acl)))
50388+ task->acl = subj;
50389+
50390+ task->role = role;
50391+
50392+ task->is_writable = 0;
50393+
50394+ /* ignore additional mmap checks for processes that are writable
50395+ by the default ACL */
50396+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50397+ if (unlikely(obj->mode & GR_WRITE))
50398+ task->is_writable = 1;
50399+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50400+ if (unlikely(obj->mode & GR_WRITE))
50401+ task->is_writable = 1;
50402+
50403+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50404+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50405+#endif
50406+
50407+ gr_set_proc_res(task);
50408+
50409+ return;
50410+}
50411+
50412+int
50413+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50414+ const int unsafe_share)
50415+{
50416+ struct task_struct *task = current;
50417+ struct acl_subject_label *newacl;
50418+ struct acl_object_label *obj;
50419+ __u32 retmode;
50420+
50421+ if (unlikely(!(gr_status & GR_READY)))
50422+ return 0;
50423+
50424+ newacl = chk_subj_label(dentry, mnt, task->role);
50425+
50426+ task_lock(task);
50427+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
50428+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
50429+ !(task->role->roletype & GR_ROLE_GOD) &&
50430+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
50431+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
50432+ task_unlock(task);
50433+ if (unsafe_share)
50434+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
50435+ else
50436+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
50437+ return -EACCES;
50438+ }
50439+ task_unlock(task);
50440+
50441+ obj = chk_obj_label(dentry, mnt, task->acl);
50442+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
50443+
50444+ if (!(task->acl->mode & GR_INHERITLEARN) &&
50445+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
50446+ if (obj->nested)
50447+ task->acl = obj->nested;
50448+ else
50449+ task->acl = newacl;
50450+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
50451+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
50452+
50453+ task->is_writable = 0;
50454+
50455+ /* ignore additional mmap checks for processes that are writable
50456+ by the default ACL */
50457+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
50458+ if (unlikely(obj->mode & GR_WRITE))
50459+ task->is_writable = 1;
50460+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
50461+ if (unlikely(obj->mode & GR_WRITE))
50462+ task->is_writable = 1;
50463+
50464+ gr_set_proc_res(task);
50465+
50466+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50467+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50468+#endif
50469+ return 0;
50470+}
50471+
50472+/* always called with valid inodev ptr */
50473+static void
50474+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
50475+{
50476+ struct acl_object_label *matchpo;
50477+ struct acl_subject_label *matchps;
50478+ struct acl_subject_label *subj;
50479+ struct acl_role_label *role;
50480+ unsigned int x;
50481+
50482+ FOR_EACH_ROLE_START(role)
50483+ FOR_EACH_SUBJECT_START(role, subj, x)
50484+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
50485+ matchpo->mode |= GR_DELETED;
50486+ FOR_EACH_SUBJECT_END(subj,x)
50487+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50488+ if (subj->inode == ino && subj->device == dev)
50489+ subj->mode |= GR_DELETED;
50490+ FOR_EACH_NESTED_SUBJECT_END(subj)
50491+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
50492+ matchps->mode |= GR_DELETED;
50493+ FOR_EACH_ROLE_END(role)
50494+
50495+ inodev->nentry->deleted = 1;
50496+
50497+ return;
50498+}
50499+
50500+void
50501+gr_handle_delete(const ino_t ino, const dev_t dev)
50502+{
50503+ struct inodev_entry *inodev;
50504+
50505+ if (unlikely(!(gr_status & GR_READY)))
50506+ return;
50507+
50508+ write_lock(&gr_inode_lock);
50509+ inodev = lookup_inodev_entry(ino, dev);
50510+ if (inodev != NULL)
50511+ do_handle_delete(inodev, ino, dev);
50512+ write_unlock(&gr_inode_lock);
50513+
50514+ return;
50515+}
50516+
50517+static void
50518+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
50519+ const ino_t newinode, const dev_t newdevice,
50520+ struct acl_subject_label *subj)
50521+{
50522+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
50523+ struct acl_object_label *match;
50524+
50525+ match = subj->obj_hash[index];
50526+
50527+ while (match && (match->inode != oldinode ||
50528+ match->device != olddevice ||
50529+ !(match->mode & GR_DELETED)))
50530+ match = match->next;
50531+
50532+ if (match && (match->inode == oldinode)
50533+ && (match->device == olddevice)
50534+ && (match->mode & GR_DELETED)) {
50535+ if (match->prev == NULL) {
50536+ subj->obj_hash[index] = match->next;
50537+ if (match->next != NULL)
50538+ match->next->prev = NULL;
50539+ } else {
50540+ match->prev->next = match->next;
50541+ if (match->next != NULL)
50542+ match->next->prev = match->prev;
50543+ }
50544+ match->prev = NULL;
50545+ match->next = NULL;
50546+ match->inode = newinode;
50547+ match->device = newdevice;
50548+ match->mode &= ~GR_DELETED;
50549+
50550+ insert_acl_obj_label(match, subj);
50551+ }
50552+
50553+ return;
50554+}
50555+
50556+static void
50557+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
50558+ const ino_t newinode, const dev_t newdevice,
50559+ struct acl_role_label *role)
50560+{
50561+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
50562+ struct acl_subject_label *match;
50563+
50564+ match = role->subj_hash[index];
50565+
50566+ while (match && (match->inode != oldinode ||
50567+ match->device != olddevice ||
50568+ !(match->mode & GR_DELETED)))
50569+ match = match->next;
50570+
50571+ if (match && (match->inode == oldinode)
50572+ && (match->device == olddevice)
50573+ && (match->mode & GR_DELETED)) {
50574+ if (match->prev == NULL) {
50575+ role->subj_hash[index] = match->next;
50576+ if (match->next != NULL)
50577+ match->next->prev = NULL;
50578+ } else {
50579+ match->prev->next = match->next;
50580+ if (match->next != NULL)
50581+ match->next->prev = match->prev;
50582+ }
50583+ match->prev = NULL;
50584+ match->next = NULL;
50585+ match->inode = newinode;
50586+ match->device = newdevice;
50587+ match->mode &= ~GR_DELETED;
50588+
50589+ insert_acl_subj_label(match, role);
50590+ }
50591+
50592+ return;
50593+}
50594+
50595+static void
50596+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
50597+ const ino_t newinode, const dev_t newdevice)
50598+{
50599+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
50600+ struct inodev_entry *match;
50601+
50602+ match = inodev_set.i_hash[index];
50603+
50604+ while (match && (match->nentry->inode != oldinode ||
50605+ match->nentry->device != olddevice || !match->nentry->deleted))
50606+ match = match->next;
50607+
50608+ if (match && (match->nentry->inode == oldinode)
50609+ && (match->nentry->device == olddevice) &&
50610+ match->nentry->deleted) {
50611+ if (match->prev == NULL) {
50612+ inodev_set.i_hash[index] = match->next;
50613+ if (match->next != NULL)
50614+ match->next->prev = NULL;
50615+ } else {
50616+ match->prev->next = match->next;
50617+ if (match->next != NULL)
50618+ match->next->prev = match->prev;
50619+ }
50620+ match->prev = NULL;
50621+ match->next = NULL;
50622+ match->nentry->inode = newinode;
50623+ match->nentry->device = newdevice;
50624+ match->nentry->deleted = 0;
50625+
50626+ insert_inodev_entry(match);
50627+ }
50628+
50629+ return;
50630+}
50631+
50632+static void
50633+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
50634+{
50635+ struct acl_subject_label *subj;
50636+ struct acl_role_label *role;
50637+ unsigned int x;
50638+
50639+ FOR_EACH_ROLE_START(role)
50640+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
50641+
50642+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50643+ if ((subj->inode == ino) && (subj->device == dev)) {
50644+ subj->inode = ino;
50645+ subj->device = dev;
50646+ }
50647+ FOR_EACH_NESTED_SUBJECT_END(subj)
50648+ FOR_EACH_SUBJECT_START(role, subj, x)
50649+ update_acl_obj_label(matchn->inode, matchn->device,
50650+ ino, dev, subj);
50651+ FOR_EACH_SUBJECT_END(subj,x)
50652+ FOR_EACH_ROLE_END(role)
50653+
50654+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
50655+
50656+ return;
50657+}
50658+
50659+static void
50660+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
50661+ const struct vfsmount *mnt)
50662+{
50663+ ino_t ino = dentry->d_inode->i_ino;
50664+ dev_t dev = __get_dev(dentry);
50665+
50666+ __do_handle_create(matchn, ino, dev);
50667+
50668+ return;
50669+}
50670+
50671+void
50672+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50673+{
50674+ struct name_entry *matchn;
50675+
50676+ if (unlikely(!(gr_status & GR_READY)))
50677+ return;
50678+
50679+ preempt_disable();
50680+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
50681+
50682+ if (unlikely((unsigned long)matchn)) {
50683+ write_lock(&gr_inode_lock);
50684+ do_handle_create(matchn, dentry, mnt);
50685+ write_unlock(&gr_inode_lock);
50686+ }
50687+ preempt_enable();
50688+
50689+ return;
50690+}
50691+
50692+void
50693+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
50694+{
50695+ struct name_entry *matchn;
50696+
50697+ if (unlikely(!(gr_status & GR_READY)))
50698+ return;
50699+
50700+ preempt_disable();
50701+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
50702+
50703+ if (unlikely((unsigned long)matchn)) {
50704+ write_lock(&gr_inode_lock);
50705+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
50706+ write_unlock(&gr_inode_lock);
50707+ }
50708+ preempt_enable();
50709+
50710+ return;
50711+}
50712+
50713+void
50714+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50715+ struct dentry *old_dentry,
50716+ struct dentry *new_dentry,
50717+ struct vfsmount *mnt, const __u8 replace)
50718+{
50719+ struct name_entry *matchn;
50720+ struct inodev_entry *inodev;
50721+ struct inode *inode = new_dentry->d_inode;
50722+ ino_t old_ino = old_dentry->d_inode->i_ino;
50723+ dev_t old_dev = __get_dev(old_dentry);
50724+
50725+ /* vfs_rename swaps the name and parent link for old_dentry and
50726+ new_dentry
50727+ at this point, old_dentry has the new name, parent link, and inode
50728+ for the renamed file
50729+ if a file is being replaced by a rename, new_dentry has the inode
50730+ and name for the replaced file
50731+ */
50732+
50733+ if (unlikely(!(gr_status & GR_READY)))
50734+ return;
50735+
50736+ preempt_disable();
50737+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
50738+
50739+ /* we wouldn't have to check d_inode if it weren't for
50740+ NFS silly-renaming
50741+ */
50742+
50743+ write_lock(&gr_inode_lock);
50744+ if (unlikely(replace && inode)) {
50745+ ino_t new_ino = inode->i_ino;
50746+ dev_t new_dev = __get_dev(new_dentry);
50747+
50748+ inodev = lookup_inodev_entry(new_ino, new_dev);
50749+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
50750+ do_handle_delete(inodev, new_ino, new_dev);
50751+ }
50752+
50753+ inodev = lookup_inodev_entry(old_ino, old_dev);
50754+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
50755+ do_handle_delete(inodev, old_ino, old_dev);
50756+
50757+ if (unlikely((unsigned long)matchn))
50758+ do_handle_create(matchn, old_dentry, mnt);
50759+
50760+ write_unlock(&gr_inode_lock);
50761+ preempt_enable();
50762+
50763+ return;
50764+}
50765+
50766+static int
50767+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
50768+ unsigned char **sum)
50769+{
50770+ struct acl_role_label *r;
50771+ struct role_allowed_ip *ipp;
50772+ struct role_transition *trans;
50773+ unsigned int i;
50774+ int found = 0;
50775+ u32 curr_ip = current->signal->curr_ip;
50776+
50777+ current->signal->saved_ip = curr_ip;
50778+
50779+ /* check transition table */
50780+
50781+ for (trans = current->role->transitions; trans; trans = trans->next) {
50782+ if (!strcmp(rolename, trans->rolename)) {
50783+ found = 1;
50784+ break;
50785+ }
50786+ }
50787+
50788+ if (!found)
50789+ return 0;
50790+
50791+ /* handle special roles that do not require authentication
50792+ and check ip */
50793+
50794+ FOR_EACH_ROLE_START(r)
50795+ if (!strcmp(rolename, r->rolename) &&
50796+ (r->roletype & GR_ROLE_SPECIAL)) {
50797+ found = 0;
50798+ if (r->allowed_ips != NULL) {
50799+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
50800+ if ((ntohl(curr_ip) & ipp->netmask) ==
50801+ (ntohl(ipp->addr) & ipp->netmask))
50802+ found = 1;
50803+ }
50804+ } else
50805+ found = 2;
50806+ if (!found)
50807+ return 0;
50808+
50809+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
50810+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
50811+ *salt = NULL;
50812+ *sum = NULL;
50813+ return 1;
50814+ }
50815+ }
50816+ FOR_EACH_ROLE_END(r)
50817+
50818+ for (i = 0; i < num_sprole_pws; i++) {
50819+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
50820+ *salt = acl_special_roles[i]->salt;
50821+ *sum = acl_special_roles[i]->sum;
50822+ return 1;
50823+ }
50824+ }
50825+
50826+ return 0;
50827+}
50828+
50829+static void
50830+assign_special_role(char *rolename)
50831+{
50832+ struct acl_object_label *obj;
50833+ struct acl_role_label *r;
50834+ struct acl_role_label *assigned = NULL;
50835+ struct task_struct *tsk;
50836+ struct file *filp;
50837+
50838+ FOR_EACH_ROLE_START(r)
50839+ if (!strcmp(rolename, r->rolename) &&
50840+ (r->roletype & GR_ROLE_SPECIAL)) {
50841+ assigned = r;
50842+ break;
50843+ }
50844+ FOR_EACH_ROLE_END(r)
50845+
50846+ if (!assigned)
50847+ return;
50848+
50849+ read_lock(&tasklist_lock);
50850+ read_lock(&grsec_exec_file_lock);
50851+
50852+ tsk = current->real_parent;
50853+ if (tsk == NULL)
50854+ goto out_unlock;
50855+
50856+ filp = tsk->exec_file;
50857+ if (filp == NULL)
50858+ goto out_unlock;
50859+
50860+ tsk->is_writable = 0;
50861+
50862+ tsk->acl_sp_role = 1;
50863+ tsk->acl_role_id = ++acl_sp_role_value;
50864+ tsk->role = assigned;
50865+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
50866+
50867+ /* ignore additional mmap checks for processes that are writable
50868+ by the default ACL */
50869+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50870+ if (unlikely(obj->mode & GR_WRITE))
50871+ tsk->is_writable = 1;
50872+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
50873+ if (unlikely(obj->mode & GR_WRITE))
50874+ tsk->is_writable = 1;
50875+
50876+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50877+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
50878+#endif
50879+
50880+out_unlock:
50881+ read_unlock(&grsec_exec_file_lock);
50882+ read_unlock(&tasklist_lock);
50883+ return;
50884+}
50885+
50886+int gr_check_secure_terminal(struct task_struct *task)
50887+{
50888+ struct task_struct *p, *p2, *p3;
50889+ struct files_struct *files;
50890+ struct fdtable *fdt;
50891+ struct file *our_file = NULL, *file;
50892+ int i;
50893+
50894+ if (task->signal->tty == NULL)
50895+ return 1;
50896+
50897+ files = get_files_struct(task);
50898+ if (files != NULL) {
50899+ rcu_read_lock();
50900+ fdt = files_fdtable(files);
50901+ for (i=0; i < fdt->max_fds; i++) {
50902+ file = fcheck_files(files, i);
50903+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
50904+ get_file(file);
50905+ our_file = file;
50906+ }
50907+ }
50908+ rcu_read_unlock();
50909+ put_files_struct(files);
50910+ }
50911+
50912+ if (our_file == NULL)
50913+ return 1;
50914+
50915+ read_lock(&tasklist_lock);
50916+ do_each_thread(p2, p) {
50917+ files = get_files_struct(p);
50918+ if (files == NULL ||
50919+ (p->signal && p->signal->tty == task->signal->tty)) {
50920+ if (files != NULL)
50921+ put_files_struct(files);
50922+ continue;
50923+ }
50924+ rcu_read_lock();
50925+ fdt = files_fdtable(files);
50926+ for (i=0; i < fdt->max_fds; i++) {
50927+ file = fcheck_files(files, i);
50928+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
50929+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
50930+ p3 = task;
50931+ while (p3->pid > 0) {
50932+ if (p3 == p)
50933+ break;
50934+ p3 = p3->real_parent;
50935+ }
50936+ if (p3 == p)
50937+ break;
50938+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
50939+ gr_handle_alertkill(p);
50940+ rcu_read_unlock();
50941+ put_files_struct(files);
50942+ read_unlock(&tasklist_lock);
50943+ fput(our_file);
50944+ return 0;
50945+ }
50946+ }
50947+ rcu_read_unlock();
50948+ put_files_struct(files);
50949+ } while_each_thread(p2, p);
50950+ read_unlock(&tasklist_lock);
50951+
50952+ fput(our_file);
50953+ return 1;
50954+}
50955+
50956+ssize_t
50957+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
50958+{
50959+ struct gr_arg_wrapper uwrap;
50960+ unsigned char *sprole_salt = NULL;
50961+ unsigned char *sprole_sum = NULL;
50962+ int error = sizeof (struct gr_arg_wrapper);
50963+ int error2 = 0;
50964+
50965+ mutex_lock(&gr_dev_mutex);
50966+
50967+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
50968+ error = -EPERM;
50969+ goto out;
50970+ }
50971+
50972+ if (count != sizeof (struct gr_arg_wrapper)) {
50973+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
50974+ error = -EINVAL;
50975+ goto out;
50976+ }
50977+
50978+
50979+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
50980+ gr_auth_expires = 0;
50981+ gr_auth_attempts = 0;
50982+ }
50983+
50984+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
50985+ error = -EFAULT;
50986+ goto out;
50987+ }
50988+
50989+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
50990+ error = -EINVAL;
50991+ goto out;
50992+ }
50993+
50994+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
50995+ error = -EFAULT;
50996+ goto out;
50997+ }
50998+
50999+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51000+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51001+ time_after(gr_auth_expires, get_seconds())) {
51002+ error = -EBUSY;
51003+ goto out;
51004+ }
51005+
51006+ /* if non-root trying to do anything other than use a special role,
51007+ do not attempt authentication, do not count towards authentication
51008+ locking
51009+ */
51010+
51011+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
51012+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51013+ current_uid()) {
51014+ error = -EPERM;
51015+ goto out;
51016+ }
51017+
51018+ /* ensure pw and special role name are null terminated */
51019+
51020+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
51021+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
51022+
51023+ /* Okay.
51024+ * We have our enough of the argument structure..(we have yet
51025+ * to copy_from_user the tables themselves) . Copy the tables
51026+ * only if we need them, i.e. for loading operations. */
51027+
51028+ switch (gr_usermode->mode) {
51029+ case GR_STATUS:
51030+ if (gr_status & GR_READY) {
51031+ error = 1;
51032+ if (!gr_check_secure_terminal(current))
51033+ error = 3;
51034+ } else
51035+ error = 2;
51036+ goto out;
51037+ case GR_SHUTDOWN:
51038+ if ((gr_status & GR_READY)
51039+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51040+ pax_open_kernel();
51041+ gr_status &= ~GR_READY;
51042+ pax_close_kernel();
51043+
51044+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
51045+ free_variables();
51046+ memset(gr_usermode, 0, sizeof (struct gr_arg));
51047+ memset(gr_system_salt, 0, GR_SALT_LEN);
51048+ memset(gr_system_sum, 0, GR_SHA_LEN);
51049+ } else if (gr_status & GR_READY) {
51050+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
51051+ error = -EPERM;
51052+ } else {
51053+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
51054+ error = -EAGAIN;
51055+ }
51056+ break;
51057+ case GR_ENABLE:
51058+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
51059+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
51060+ else {
51061+ if (gr_status & GR_READY)
51062+ error = -EAGAIN;
51063+ else
51064+ error = error2;
51065+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
51066+ }
51067+ break;
51068+ case GR_RELOAD:
51069+ if (!(gr_status & GR_READY)) {
51070+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
51071+ error = -EAGAIN;
51072+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51073+ preempt_disable();
51074+
51075+ pax_open_kernel();
51076+ gr_status &= ~GR_READY;
51077+ pax_close_kernel();
51078+
51079+ free_variables();
51080+ if (!(error2 = gracl_init(gr_usermode))) {
51081+ preempt_enable();
51082+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
51083+ } else {
51084+ preempt_enable();
51085+ error = error2;
51086+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51087+ }
51088+ } else {
51089+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51090+ error = -EPERM;
51091+ }
51092+ break;
51093+ case GR_SEGVMOD:
51094+ if (unlikely(!(gr_status & GR_READY))) {
51095+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
51096+ error = -EAGAIN;
51097+ break;
51098+ }
51099+
51100+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51101+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
51102+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
51103+ struct acl_subject_label *segvacl;
51104+ segvacl =
51105+ lookup_acl_subj_label(gr_usermode->segv_inode,
51106+ gr_usermode->segv_device,
51107+ current->role);
51108+ if (segvacl) {
51109+ segvacl->crashes = 0;
51110+ segvacl->expires = 0;
51111+ }
51112+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
51113+ gr_remove_uid(gr_usermode->segv_uid);
51114+ }
51115+ } else {
51116+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
51117+ error = -EPERM;
51118+ }
51119+ break;
51120+ case GR_SPROLE:
51121+ case GR_SPROLEPAM:
51122+ if (unlikely(!(gr_status & GR_READY))) {
51123+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
51124+ error = -EAGAIN;
51125+ break;
51126+ }
51127+
51128+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
51129+ current->role->expires = 0;
51130+ current->role->auth_attempts = 0;
51131+ }
51132+
51133+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51134+ time_after(current->role->expires, get_seconds())) {
51135+ error = -EBUSY;
51136+ goto out;
51137+ }
51138+
51139+ if (lookup_special_role_auth
51140+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
51141+ && ((!sprole_salt && !sprole_sum)
51142+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
51143+ char *p = "";
51144+ assign_special_role(gr_usermode->sp_role);
51145+ read_lock(&tasklist_lock);
51146+ if (current->real_parent)
51147+ p = current->real_parent->role->rolename;
51148+ read_unlock(&tasklist_lock);
51149+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
51150+ p, acl_sp_role_value);
51151+ } else {
51152+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
51153+ error = -EPERM;
51154+ if(!(current->role->auth_attempts++))
51155+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51156+
51157+ goto out;
51158+ }
51159+ break;
51160+ case GR_UNSPROLE:
51161+ if (unlikely(!(gr_status & GR_READY))) {
51162+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
51163+ error = -EAGAIN;
51164+ break;
51165+ }
51166+
51167+ if (current->role->roletype & GR_ROLE_SPECIAL) {
51168+ char *p = "";
51169+ int i = 0;
51170+
51171+ read_lock(&tasklist_lock);
51172+ if (current->real_parent) {
51173+ p = current->real_parent->role->rolename;
51174+ i = current->real_parent->acl_role_id;
51175+ }
51176+ read_unlock(&tasklist_lock);
51177+
51178+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
51179+ gr_set_acls(1);
51180+ } else {
51181+ error = -EPERM;
51182+ goto out;
51183+ }
51184+ break;
51185+ default:
51186+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
51187+ error = -EINVAL;
51188+ break;
51189+ }
51190+
51191+ if (error != -EPERM)
51192+ goto out;
51193+
51194+ if(!(gr_auth_attempts++))
51195+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51196+
51197+ out:
51198+ mutex_unlock(&gr_dev_mutex);
51199+ return error;
51200+}
51201+
51202+/* must be called with
51203+ rcu_read_lock();
51204+ read_lock(&tasklist_lock);
51205+ read_lock(&grsec_exec_file_lock);
51206+*/
51207+int gr_apply_subject_to_task(struct task_struct *task)
51208+{
51209+ struct acl_object_label *obj;
51210+ char *tmpname;
51211+ struct acl_subject_label *tmpsubj;
51212+ struct file *filp;
51213+ struct name_entry *nmatch;
51214+
51215+ filp = task->exec_file;
51216+ if (filp == NULL)
51217+ return 0;
51218+
51219+ /* the following is to apply the correct subject
51220+ on binaries running when the RBAC system
51221+ is enabled, when the binaries have been
51222+ replaced or deleted since their execution
51223+ -----
51224+ when the RBAC system starts, the inode/dev
51225+ from exec_file will be one the RBAC system
51226+ is unaware of. It only knows the inode/dev
51227+ of the present file on disk, or the absence
51228+ of it.
51229+ */
51230+ preempt_disable();
51231+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
51232+
51233+ nmatch = lookup_name_entry(tmpname);
51234+ preempt_enable();
51235+ tmpsubj = NULL;
51236+ if (nmatch) {
51237+ if (nmatch->deleted)
51238+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
51239+ else
51240+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
51241+ if (tmpsubj != NULL)
51242+ task->acl = tmpsubj;
51243+ }
51244+ if (tmpsubj == NULL)
51245+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
51246+ task->role);
51247+ if (task->acl) {
51248+ task->is_writable = 0;
51249+ /* ignore additional mmap checks for processes that are writable
51250+ by the default ACL */
51251+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51252+ if (unlikely(obj->mode & GR_WRITE))
51253+ task->is_writable = 1;
51254+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
51255+ if (unlikely(obj->mode & GR_WRITE))
51256+ task->is_writable = 1;
51257+
51258+ gr_set_proc_res(task);
51259+
51260+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51261+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51262+#endif
51263+ } else {
51264+ return 1;
51265+ }
51266+
51267+ return 0;
51268+}
51269+
51270+int
51271+gr_set_acls(const int type)
51272+{
51273+ struct task_struct *task, *task2;
51274+ struct acl_role_label *role = current->role;
51275+ __u16 acl_role_id = current->acl_role_id;
51276+ const struct cred *cred;
51277+ int ret;
51278+
51279+ rcu_read_lock();
51280+ read_lock(&tasklist_lock);
51281+ read_lock(&grsec_exec_file_lock);
51282+ do_each_thread(task2, task) {
51283+ /* check to see if we're called from the exit handler,
51284+ if so, only replace ACLs that have inherited the admin
51285+ ACL */
51286+
51287+ if (type && (task->role != role ||
51288+ task->acl_role_id != acl_role_id))
51289+ continue;
51290+
51291+ task->acl_role_id = 0;
51292+ task->acl_sp_role = 0;
51293+
51294+ if (task->exec_file) {
51295+ cred = __task_cred(task);
51296+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
51297+ ret = gr_apply_subject_to_task(task);
51298+ if (ret) {
51299+ read_unlock(&grsec_exec_file_lock);
51300+ read_unlock(&tasklist_lock);
51301+ rcu_read_unlock();
51302+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
51303+ return ret;
51304+ }
51305+ } else {
51306+ // it's a kernel process
51307+ task->role = kernel_role;
51308+ task->acl = kernel_role->root_label;
51309+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
51310+ task->acl->mode &= ~GR_PROCFIND;
51311+#endif
51312+ }
51313+ } while_each_thread(task2, task);
51314+ read_unlock(&grsec_exec_file_lock);
51315+ read_unlock(&tasklist_lock);
51316+ rcu_read_unlock();
51317+
51318+ return 0;
51319+}
51320+
51321+void
51322+gr_learn_resource(const struct task_struct *task,
51323+ const int res, const unsigned long wanted, const int gt)
51324+{
51325+ struct acl_subject_label *acl;
51326+ const struct cred *cred;
51327+
51328+ if (unlikely((gr_status & GR_READY) &&
51329+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
51330+ goto skip_reslog;
51331+
51332+#ifdef CONFIG_GRKERNSEC_RESLOG
51333+ gr_log_resource(task, res, wanted, gt);
51334+#endif
51335+ skip_reslog:
51336+
51337+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
51338+ return;
51339+
51340+ acl = task->acl;
51341+
51342+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
51343+ !(acl->resmask & (1 << (unsigned short) res))))
51344+ return;
51345+
51346+ if (wanted >= acl->res[res].rlim_cur) {
51347+ unsigned long res_add;
51348+
51349+ res_add = wanted;
51350+ switch (res) {
51351+ case RLIMIT_CPU:
51352+ res_add += GR_RLIM_CPU_BUMP;
51353+ break;
51354+ case RLIMIT_FSIZE:
51355+ res_add += GR_RLIM_FSIZE_BUMP;
51356+ break;
51357+ case RLIMIT_DATA:
51358+ res_add += GR_RLIM_DATA_BUMP;
51359+ break;
51360+ case RLIMIT_STACK:
51361+ res_add += GR_RLIM_STACK_BUMP;
51362+ break;
51363+ case RLIMIT_CORE:
51364+ res_add += GR_RLIM_CORE_BUMP;
51365+ break;
51366+ case RLIMIT_RSS:
51367+ res_add += GR_RLIM_RSS_BUMP;
51368+ break;
51369+ case RLIMIT_NPROC:
51370+ res_add += GR_RLIM_NPROC_BUMP;
51371+ break;
51372+ case RLIMIT_NOFILE:
51373+ res_add += GR_RLIM_NOFILE_BUMP;
51374+ break;
51375+ case RLIMIT_MEMLOCK:
51376+ res_add += GR_RLIM_MEMLOCK_BUMP;
51377+ break;
51378+ case RLIMIT_AS:
51379+ res_add += GR_RLIM_AS_BUMP;
51380+ break;
51381+ case RLIMIT_LOCKS:
51382+ res_add += GR_RLIM_LOCKS_BUMP;
51383+ break;
51384+ case RLIMIT_SIGPENDING:
51385+ res_add += GR_RLIM_SIGPENDING_BUMP;
51386+ break;
51387+ case RLIMIT_MSGQUEUE:
51388+ res_add += GR_RLIM_MSGQUEUE_BUMP;
51389+ break;
51390+ case RLIMIT_NICE:
51391+ res_add += GR_RLIM_NICE_BUMP;
51392+ break;
51393+ case RLIMIT_RTPRIO:
51394+ res_add += GR_RLIM_RTPRIO_BUMP;
51395+ break;
51396+ case RLIMIT_RTTIME:
51397+ res_add += GR_RLIM_RTTIME_BUMP;
51398+ break;
51399+ }
51400+
51401+ acl->res[res].rlim_cur = res_add;
51402+
51403+ if (wanted > acl->res[res].rlim_max)
51404+ acl->res[res].rlim_max = res_add;
51405+
51406+ /* only log the subject filename, since resource logging is supported for
51407+ single-subject learning only */
51408+ rcu_read_lock();
51409+ cred = __task_cred(task);
51410+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51411+ task->role->roletype, cred->uid, cred->gid, acl->filename,
51412+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
51413+ "", (unsigned long) res, &task->signal->saved_ip);
51414+ rcu_read_unlock();
51415+ }
51416+
51417+ return;
51418+}
51419+
51420+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
51421+void
51422+pax_set_initial_flags(struct linux_binprm *bprm)
51423+{
51424+ struct task_struct *task = current;
51425+ struct acl_subject_label *proc;
51426+ unsigned long flags;
51427+
51428+ if (unlikely(!(gr_status & GR_READY)))
51429+ return;
51430+
51431+ flags = pax_get_flags(task);
51432+
51433+ proc = task->acl;
51434+
51435+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
51436+ flags &= ~MF_PAX_PAGEEXEC;
51437+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
51438+ flags &= ~MF_PAX_SEGMEXEC;
51439+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
51440+ flags &= ~MF_PAX_RANDMMAP;
51441+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
51442+ flags &= ~MF_PAX_EMUTRAMP;
51443+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
51444+ flags &= ~MF_PAX_MPROTECT;
51445+
51446+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
51447+ flags |= MF_PAX_PAGEEXEC;
51448+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
51449+ flags |= MF_PAX_SEGMEXEC;
51450+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
51451+ flags |= MF_PAX_RANDMMAP;
51452+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
51453+ flags |= MF_PAX_EMUTRAMP;
51454+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
51455+ flags |= MF_PAX_MPROTECT;
51456+
51457+ pax_set_flags(task, flags);
51458+
51459+ return;
51460+}
51461+#endif
51462+
51463+#ifdef CONFIG_SYSCTL
51464+/* Eric Biederman likes breaking userland ABI and every inode-based security
51465+ system to save 35kb of memory */
51466+
51467+/* we modify the passed in filename, but adjust it back before returning */
51468+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
51469+{
51470+ struct name_entry *nmatch;
51471+ char *p, *lastp = NULL;
51472+ struct acl_object_label *obj = NULL, *tmp;
51473+ struct acl_subject_label *tmpsubj;
51474+ char c = '\0';
51475+
51476+ read_lock(&gr_inode_lock);
51477+
51478+ p = name + len - 1;
51479+ do {
51480+ nmatch = lookup_name_entry(name);
51481+ if (lastp != NULL)
51482+ *lastp = c;
51483+
51484+ if (nmatch == NULL)
51485+ goto next_component;
51486+ tmpsubj = current->acl;
51487+ do {
51488+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
51489+ if (obj != NULL) {
51490+ tmp = obj->globbed;
51491+ while (tmp) {
51492+ if (!glob_match(tmp->filename, name)) {
51493+ obj = tmp;
51494+ goto found_obj;
51495+ }
51496+ tmp = tmp->next;
51497+ }
51498+ goto found_obj;
51499+ }
51500+ } while ((tmpsubj = tmpsubj->parent_subject));
51501+next_component:
51502+ /* end case */
51503+ if (p == name)
51504+ break;
51505+
51506+ while (*p != '/')
51507+ p--;
51508+ if (p == name)
51509+ lastp = p + 1;
51510+ else {
51511+ lastp = p;
51512+ p--;
51513+ }
51514+ c = *lastp;
51515+ *lastp = '\0';
51516+ } while (1);
51517+found_obj:
51518+ read_unlock(&gr_inode_lock);
51519+ /* obj returned will always be non-null */
51520+ return obj;
51521+}
51522+
51523+/* returns 0 when allowing, non-zero on error
51524+ op of 0 is used for readdir, so we don't log the names of hidden files
51525+*/
51526+__u32
51527+gr_handle_sysctl(const struct ctl_table *table, const int op)
51528+{
51529+ struct ctl_table *tmp;
51530+ const char *proc_sys = "/proc/sys";
51531+ char *path;
51532+ struct acl_object_label *obj;
51533+ unsigned short len = 0, pos = 0, depth = 0, i;
51534+ __u32 err = 0;
51535+ __u32 mode = 0;
51536+
51537+ if (unlikely(!(gr_status & GR_READY)))
51538+ return 0;
51539+
51540+ /* for now, ignore operations on non-sysctl entries if it's not a
51541+ readdir*/
51542+ if (table->child != NULL && op != 0)
51543+ return 0;
51544+
51545+ mode |= GR_FIND;
51546+ /* it's only a read if it's an entry, read on dirs is for readdir */
51547+ if (op & MAY_READ)
51548+ mode |= GR_READ;
51549+ if (op & MAY_WRITE)
51550+ mode |= GR_WRITE;
51551+
51552+ preempt_disable();
51553+
51554+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51555+
51556+ /* it's only a read/write if it's an actual entry, not a dir
51557+ (which are opened for readdir)
51558+ */
51559+
51560+ /* convert the requested sysctl entry into a pathname */
51561+
51562+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51563+ len += strlen(tmp->procname);
51564+ len++;
51565+ depth++;
51566+ }
51567+
51568+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
51569+ /* deny */
51570+ goto out;
51571+ }
51572+
51573+ memset(path, 0, PAGE_SIZE);
51574+
51575+ memcpy(path, proc_sys, strlen(proc_sys));
51576+
51577+ pos += strlen(proc_sys);
51578+
51579+ for (; depth > 0; depth--) {
51580+ path[pos] = '/';
51581+ pos++;
51582+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51583+ if (depth == i) {
51584+ memcpy(path + pos, tmp->procname,
51585+ strlen(tmp->procname));
51586+ pos += strlen(tmp->procname);
51587+ }
51588+ i++;
51589+ }
51590+ }
51591+
51592+ obj = gr_lookup_by_name(path, pos);
51593+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
51594+
51595+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
51596+ ((err & mode) != mode))) {
51597+ __u32 new_mode = mode;
51598+
51599+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51600+
51601+ err = 0;
51602+ gr_log_learn_sysctl(path, new_mode);
51603+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
51604+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
51605+ err = -ENOENT;
51606+ } else if (!(err & GR_FIND)) {
51607+ err = -ENOENT;
51608+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
51609+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
51610+ path, (mode & GR_READ) ? " reading" : "",
51611+ (mode & GR_WRITE) ? " writing" : "");
51612+ err = -EACCES;
51613+ } else if ((err & mode) != mode) {
51614+ err = -EACCES;
51615+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
51616+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
51617+ path, (mode & GR_READ) ? " reading" : "",
51618+ (mode & GR_WRITE) ? " writing" : "");
51619+ err = 0;
51620+ } else
51621+ err = 0;
51622+
51623+ out:
51624+ preempt_enable();
51625+
51626+ return err;
51627+}
51628+#endif
51629+
51630+int
51631+gr_handle_proc_ptrace(struct task_struct *task)
51632+{
51633+ struct file *filp;
51634+ struct task_struct *tmp = task;
51635+ struct task_struct *curtemp = current;
51636+ __u32 retmode;
51637+
51638+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51639+ if (unlikely(!(gr_status & GR_READY)))
51640+ return 0;
51641+#endif
51642+
51643+ read_lock(&tasklist_lock);
51644+ read_lock(&grsec_exec_file_lock);
51645+ filp = task->exec_file;
51646+
51647+ while (tmp->pid > 0) {
51648+ if (tmp == curtemp)
51649+ break;
51650+ tmp = tmp->real_parent;
51651+ }
51652+
51653+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51654+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
51655+ read_unlock(&grsec_exec_file_lock);
51656+ read_unlock(&tasklist_lock);
51657+ return 1;
51658+ }
51659+
51660+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51661+ if (!(gr_status & GR_READY)) {
51662+ read_unlock(&grsec_exec_file_lock);
51663+ read_unlock(&tasklist_lock);
51664+ return 0;
51665+ }
51666+#endif
51667+
51668+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
51669+ read_unlock(&grsec_exec_file_lock);
51670+ read_unlock(&tasklist_lock);
51671+
51672+ if (retmode & GR_NOPTRACE)
51673+ return 1;
51674+
51675+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
51676+ && (current->acl != task->acl || (current->acl != current->role->root_label
51677+ && current->pid != task->pid)))
51678+ return 1;
51679+
51680+ return 0;
51681+}
51682+
51683+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
51684+{
51685+ if (unlikely(!(gr_status & GR_READY)))
51686+ return;
51687+
51688+ if (!(current->role->roletype & GR_ROLE_GOD))
51689+ return;
51690+
51691+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
51692+ p->role->rolename, gr_task_roletype_to_char(p),
51693+ p->acl->filename);
51694+}
51695+
51696+int
51697+gr_handle_ptrace(struct task_struct *task, const long request)
51698+{
51699+ struct task_struct *tmp = task;
51700+ struct task_struct *curtemp = current;
51701+ __u32 retmode;
51702+
51703+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51704+ if (unlikely(!(gr_status & GR_READY)))
51705+ return 0;
51706+#endif
51707+
51708+ read_lock(&tasklist_lock);
51709+ while (tmp->pid > 0) {
51710+ if (tmp == curtemp)
51711+ break;
51712+ tmp = tmp->real_parent;
51713+ }
51714+
51715+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51716+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
51717+ read_unlock(&tasklist_lock);
51718+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51719+ return 1;
51720+ }
51721+ read_unlock(&tasklist_lock);
51722+
51723+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51724+ if (!(gr_status & GR_READY))
51725+ return 0;
51726+#endif
51727+
51728+ read_lock(&grsec_exec_file_lock);
51729+ if (unlikely(!task->exec_file)) {
51730+ read_unlock(&grsec_exec_file_lock);
51731+ return 0;
51732+ }
51733+
51734+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
51735+ read_unlock(&grsec_exec_file_lock);
51736+
51737+ if (retmode & GR_NOPTRACE) {
51738+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51739+ return 1;
51740+ }
51741+
51742+ if (retmode & GR_PTRACERD) {
51743+ switch (request) {
51744+ case PTRACE_SEIZE:
51745+ case PTRACE_POKETEXT:
51746+ case PTRACE_POKEDATA:
51747+ case PTRACE_POKEUSR:
51748+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
51749+ case PTRACE_SETREGS:
51750+ case PTRACE_SETFPREGS:
51751+#endif
51752+#ifdef CONFIG_X86
51753+ case PTRACE_SETFPXREGS:
51754+#endif
51755+#ifdef CONFIG_ALTIVEC
51756+ case PTRACE_SETVRREGS:
51757+#endif
51758+ return 1;
51759+ default:
51760+ return 0;
51761+ }
51762+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
51763+ !(current->role->roletype & GR_ROLE_GOD) &&
51764+ (current->acl != task->acl)) {
51765+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51766+ return 1;
51767+ }
51768+
51769+ return 0;
51770+}
51771+
51772+static int is_writable_mmap(const struct file *filp)
51773+{
51774+ struct task_struct *task = current;
51775+ struct acl_object_label *obj, *obj2;
51776+
51777+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
51778+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
51779+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51780+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
51781+ task->role->root_label);
51782+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
51783+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
51784+ return 1;
51785+ }
51786+ }
51787+ return 0;
51788+}
51789+
51790+int
51791+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
51792+{
51793+ __u32 mode;
51794+
51795+ if (unlikely(!file || !(prot & PROT_EXEC)))
51796+ return 1;
51797+
51798+ if (is_writable_mmap(file))
51799+ return 0;
51800+
51801+ mode =
51802+ gr_search_file(file->f_path.dentry,
51803+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51804+ file->f_path.mnt);
51805+
51806+ if (!gr_tpe_allow(file))
51807+ return 0;
51808+
51809+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51810+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51811+ return 0;
51812+ } else if (unlikely(!(mode & GR_EXEC))) {
51813+ return 0;
51814+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51815+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51816+ return 1;
51817+ }
51818+
51819+ return 1;
51820+}
51821+
51822+int
51823+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
51824+{
51825+ __u32 mode;
51826+
51827+ if (unlikely(!file || !(prot & PROT_EXEC)))
51828+ return 1;
51829+
51830+ if (is_writable_mmap(file))
51831+ return 0;
51832+
51833+ mode =
51834+ gr_search_file(file->f_path.dentry,
51835+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51836+ file->f_path.mnt);
51837+
51838+ if (!gr_tpe_allow(file))
51839+ return 0;
51840+
51841+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51842+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51843+ return 0;
51844+ } else if (unlikely(!(mode & GR_EXEC))) {
51845+ return 0;
51846+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51847+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51848+ return 1;
51849+ }
51850+
51851+ return 1;
51852+}
51853+
51854+void
51855+gr_acl_handle_psacct(struct task_struct *task, const long code)
51856+{
51857+ unsigned long runtime;
51858+ unsigned long cputime;
51859+ unsigned int wday, cday;
51860+ __u8 whr, chr;
51861+ __u8 wmin, cmin;
51862+ __u8 wsec, csec;
51863+ struct timespec timeval;
51864+
51865+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
51866+ !(task->acl->mode & GR_PROCACCT)))
51867+ return;
51868+
51869+ do_posix_clock_monotonic_gettime(&timeval);
51870+ runtime = timeval.tv_sec - task->start_time.tv_sec;
51871+ wday = runtime / (3600 * 24);
51872+ runtime -= wday * (3600 * 24);
51873+ whr = runtime / 3600;
51874+ runtime -= whr * 3600;
51875+ wmin = runtime / 60;
51876+ runtime -= wmin * 60;
51877+ wsec = runtime;
51878+
51879+ cputime = (task->utime + task->stime) / HZ;
51880+ cday = cputime / (3600 * 24);
51881+ cputime -= cday * (3600 * 24);
51882+ chr = cputime / 3600;
51883+ cputime -= chr * 3600;
51884+ cmin = cputime / 60;
51885+ cputime -= cmin * 60;
51886+ csec = cputime;
51887+
51888+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
51889+
51890+ return;
51891+}
51892+
51893+void gr_set_kernel_label(struct task_struct *task)
51894+{
51895+ if (gr_status & GR_READY) {
51896+ task->role = kernel_role;
51897+ task->acl = kernel_role->root_label;
51898+ }
51899+ return;
51900+}
51901+
51902+#ifdef CONFIG_TASKSTATS
51903+int gr_is_taskstats_denied(int pid)
51904+{
51905+ struct task_struct *task;
51906+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51907+ const struct cred *cred;
51908+#endif
51909+ int ret = 0;
51910+
51911+ /* restrict taskstats viewing to un-chrooted root users
51912+ who have the 'view' subject flag if the RBAC system is enabled
51913+ */
51914+
51915+ rcu_read_lock();
51916+ read_lock(&tasklist_lock);
51917+ task = find_task_by_vpid(pid);
51918+ if (task) {
51919+#ifdef CONFIG_GRKERNSEC_CHROOT
51920+ if (proc_is_chrooted(task))
51921+ ret = -EACCES;
51922+#endif
51923+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51924+ cred = __task_cred(task);
51925+#ifdef CONFIG_GRKERNSEC_PROC_USER
51926+ if (cred->uid != 0)
51927+ ret = -EACCES;
51928+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51929+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
51930+ ret = -EACCES;
51931+#endif
51932+#endif
51933+ if (gr_status & GR_READY) {
51934+ if (!(task->acl->mode & GR_VIEW))
51935+ ret = -EACCES;
51936+ }
51937+ } else
51938+ ret = -ENOENT;
51939+
51940+ read_unlock(&tasklist_lock);
51941+ rcu_read_unlock();
51942+
51943+ return ret;
51944+}
51945+#endif
51946+
51947+/* AUXV entries are filled via a descendant of search_binary_handler
51948+ after we've already applied the subject for the target
51949+*/
51950+int gr_acl_enable_at_secure(void)
51951+{
51952+ if (unlikely(!(gr_status & GR_READY)))
51953+ return 0;
51954+
51955+ if (current->acl->mode & GR_ATSECURE)
51956+ return 1;
51957+
51958+ return 0;
51959+}
51960+
51961+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
51962+{
51963+ struct task_struct *task = current;
51964+ struct dentry *dentry = file->f_path.dentry;
51965+ struct vfsmount *mnt = file->f_path.mnt;
51966+ struct acl_object_label *obj, *tmp;
51967+ struct acl_subject_label *subj;
51968+ unsigned int bufsize;
51969+ int is_not_root;
51970+ char *path;
51971+ dev_t dev = __get_dev(dentry);
51972+
51973+ if (unlikely(!(gr_status & GR_READY)))
51974+ return 1;
51975+
51976+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
51977+ return 1;
51978+
51979+ /* ignore Eric Biederman */
51980+ if (IS_PRIVATE(dentry->d_inode))
51981+ return 1;
51982+
51983+ subj = task->acl;
51984+ do {
51985+ obj = lookup_acl_obj_label(ino, dev, subj);
51986+ if (obj != NULL)
51987+ return (obj->mode & GR_FIND) ? 1 : 0;
51988+ } while ((subj = subj->parent_subject));
51989+
51990+ /* this is purely an optimization since we're looking for an object
51991+ for the directory we're doing a readdir on
51992+ if it's possible for any globbed object to match the entry we're
51993+ filling into the directory, then the object we find here will be
51994+ an anchor point with attached globbed objects
51995+ */
51996+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
51997+ if (obj->globbed == NULL)
51998+ return (obj->mode & GR_FIND) ? 1 : 0;
51999+
52000+ is_not_root = ((obj->filename[0] == '/') &&
52001+ (obj->filename[1] == '\0')) ? 0 : 1;
52002+ bufsize = PAGE_SIZE - namelen - is_not_root;
52003+
52004+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
52005+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
52006+ return 1;
52007+
52008+ preempt_disable();
52009+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52010+ bufsize);
52011+
52012+ bufsize = strlen(path);
52013+
52014+ /* if base is "/", don't append an additional slash */
52015+ if (is_not_root)
52016+ *(path + bufsize) = '/';
52017+ memcpy(path + bufsize + is_not_root, name, namelen);
52018+ *(path + bufsize + namelen + is_not_root) = '\0';
52019+
52020+ tmp = obj->globbed;
52021+ while (tmp) {
52022+ if (!glob_match(tmp->filename, path)) {
52023+ preempt_enable();
52024+ return (tmp->mode & GR_FIND) ? 1 : 0;
52025+ }
52026+ tmp = tmp->next;
52027+ }
52028+ preempt_enable();
52029+ return (obj->mode & GR_FIND) ? 1 : 0;
52030+}
52031+
52032+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
52033+EXPORT_SYMBOL(gr_acl_is_enabled);
52034+#endif
52035+EXPORT_SYMBOL(gr_learn_resource);
52036+EXPORT_SYMBOL(gr_set_kernel_label);
52037+#ifdef CONFIG_SECURITY
52038+EXPORT_SYMBOL(gr_check_user_change);
52039+EXPORT_SYMBOL(gr_check_group_change);
52040+#endif
52041+
52042diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
52043new file mode 100644
52044index 0000000..34fefda
52045--- /dev/null
52046+++ b/grsecurity/gracl_alloc.c
52047@@ -0,0 +1,105 @@
52048+#include <linux/kernel.h>
52049+#include <linux/mm.h>
52050+#include <linux/slab.h>
52051+#include <linux/vmalloc.h>
52052+#include <linux/gracl.h>
52053+#include <linux/grsecurity.h>
52054+
52055+static unsigned long alloc_stack_next = 1;
52056+static unsigned long alloc_stack_size = 1;
52057+static void **alloc_stack;
52058+
52059+static __inline__ int
52060+alloc_pop(void)
52061+{
52062+ if (alloc_stack_next == 1)
52063+ return 0;
52064+
52065+ kfree(alloc_stack[alloc_stack_next - 2]);
52066+
52067+ alloc_stack_next--;
52068+
52069+ return 1;
52070+}
52071+
52072+static __inline__ int
52073+alloc_push(void *buf)
52074+{
52075+ if (alloc_stack_next >= alloc_stack_size)
52076+ return 1;
52077+
52078+ alloc_stack[alloc_stack_next - 1] = buf;
52079+
52080+ alloc_stack_next++;
52081+
52082+ return 0;
52083+}
52084+
52085+void *
52086+acl_alloc(unsigned long len)
52087+{
52088+ void *ret = NULL;
52089+
52090+ if (!len || len > PAGE_SIZE)
52091+ goto out;
52092+
52093+ ret = kmalloc(len, GFP_KERNEL);
52094+
52095+ if (ret) {
52096+ if (alloc_push(ret)) {
52097+ kfree(ret);
52098+ ret = NULL;
52099+ }
52100+ }
52101+
52102+out:
52103+ return ret;
52104+}
52105+
52106+void *
52107+acl_alloc_num(unsigned long num, unsigned long len)
52108+{
52109+ if (!len || (num > (PAGE_SIZE / len)))
52110+ return NULL;
52111+
52112+ return acl_alloc(num * len);
52113+}
52114+
52115+void
52116+acl_free_all(void)
52117+{
52118+ if (gr_acl_is_enabled() || !alloc_stack)
52119+ return;
52120+
52121+ while (alloc_pop()) ;
52122+
52123+ if (alloc_stack) {
52124+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
52125+ kfree(alloc_stack);
52126+ else
52127+ vfree(alloc_stack);
52128+ }
52129+
52130+ alloc_stack = NULL;
52131+ alloc_stack_size = 1;
52132+ alloc_stack_next = 1;
52133+
52134+ return;
52135+}
52136+
52137+int
52138+acl_alloc_stack_init(unsigned long size)
52139+{
52140+ if ((size * sizeof (void *)) <= PAGE_SIZE)
52141+ alloc_stack =
52142+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
52143+ else
52144+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
52145+
52146+ alloc_stack_size = size;
52147+
52148+ if (!alloc_stack)
52149+ return 0;
52150+ else
52151+ return 1;
52152+}
52153diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
52154new file mode 100644
52155index 0000000..955ddfb
52156--- /dev/null
52157+++ b/grsecurity/gracl_cap.c
52158@@ -0,0 +1,101 @@
52159+#include <linux/kernel.h>
52160+#include <linux/module.h>
52161+#include <linux/sched.h>
52162+#include <linux/gracl.h>
52163+#include <linux/grsecurity.h>
52164+#include <linux/grinternal.h>
52165+
52166+extern const char *captab_log[];
52167+extern int captab_log_entries;
52168+
52169+int
52170+gr_acl_is_capable(const int cap)
52171+{
52172+ struct task_struct *task = current;
52173+ const struct cred *cred = current_cred();
52174+ struct acl_subject_label *curracl;
52175+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52176+ kernel_cap_t cap_audit = __cap_empty_set;
52177+
52178+ if (!gr_acl_is_enabled())
52179+ return 1;
52180+
52181+ curracl = task->acl;
52182+
52183+ cap_drop = curracl->cap_lower;
52184+ cap_mask = curracl->cap_mask;
52185+ cap_audit = curracl->cap_invert_audit;
52186+
52187+ while ((curracl = curracl->parent_subject)) {
52188+ /* if the cap isn't specified in the current computed mask but is specified in the
52189+ current level subject, and is lowered in the current level subject, then add
52190+ it to the set of dropped capabilities
52191+ otherwise, add the current level subject's mask to the current computed mask
52192+ */
52193+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52194+ cap_raise(cap_mask, cap);
52195+ if (cap_raised(curracl->cap_lower, cap))
52196+ cap_raise(cap_drop, cap);
52197+ if (cap_raised(curracl->cap_invert_audit, cap))
52198+ cap_raise(cap_audit, cap);
52199+ }
52200+ }
52201+
52202+ if (!cap_raised(cap_drop, cap)) {
52203+ if (cap_raised(cap_audit, cap))
52204+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
52205+ return 1;
52206+ }
52207+
52208+ curracl = task->acl;
52209+
52210+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
52211+ && cap_raised(cred->cap_effective, cap)) {
52212+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52213+ task->role->roletype, cred->uid,
52214+ cred->gid, task->exec_file ?
52215+ gr_to_filename(task->exec_file->f_path.dentry,
52216+ task->exec_file->f_path.mnt) : curracl->filename,
52217+ curracl->filename, 0UL,
52218+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
52219+ return 1;
52220+ }
52221+
52222+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
52223+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
52224+ return 0;
52225+}
52226+
52227+int
52228+gr_acl_is_capable_nolog(const int cap)
52229+{
52230+ struct acl_subject_label *curracl;
52231+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52232+
52233+ if (!gr_acl_is_enabled())
52234+ return 1;
52235+
52236+ curracl = current->acl;
52237+
52238+ cap_drop = curracl->cap_lower;
52239+ cap_mask = curracl->cap_mask;
52240+
52241+ while ((curracl = curracl->parent_subject)) {
52242+ /* if the cap isn't specified in the current computed mask but is specified in the
52243+ current level subject, and is lowered in the current level subject, then add
52244+ it to the set of dropped capabilities
52245+ otherwise, add the current level subject's mask to the current computed mask
52246+ */
52247+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52248+ cap_raise(cap_mask, cap);
52249+ if (cap_raised(curracl->cap_lower, cap))
52250+ cap_raise(cap_drop, cap);
52251+ }
52252+ }
52253+
52254+ if (!cap_raised(cap_drop, cap))
52255+ return 1;
52256+
52257+ return 0;
52258+}
52259+
52260diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
52261new file mode 100644
52262index 0000000..4eda5c3
52263--- /dev/null
52264+++ b/grsecurity/gracl_fs.c
52265@@ -0,0 +1,433 @@
52266+#include <linux/kernel.h>
52267+#include <linux/sched.h>
52268+#include <linux/types.h>
52269+#include <linux/fs.h>
52270+#include <linux/file.h>
52271+#include <linux/stat.h>
52272+#include <linux/grsecurity.h>
52273+#include <linux/grinternal.h>
52274+#include <linux/gracl.h>
52275+
52276+__u32
52277+gr_acl_handle_hidden_file(const struct dentry * dentry,
52278+ const struct vfsmount * mnt)
52279+{
52280+ __u32 mode;
52281+
52282+ if (unlikely(!dentry->d_inode))
52283+ return GR_FIND;
52284+
52285+ mode =
52286+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
52287+
52288+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
52289+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52290+ return mode;
52291+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
52292+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52293+ return 0;
52294+ } else if (unlikely(!(mode & GR_FIND)))
52295+ return 0;
52296+
52297+ return GR_FIND;
52298+}
52299+
52300+__u32
52301+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52302+ int acc_mode)
52303+{
52304+ __u32 reqmode = GR_FIND;
52305+ __u32 mode;
52306+
52307+ if (unlikely(!dentry->d_inode))
52308+ return reqmode;
52309+
52310+ if (acc_mode & MAY_APPEND)
52311+ reqmode |= GR_APPEND;
52312+ else if (acc_mode & MAY_WRITE)
52313+ reqmode |= GR_WRITE;
52314+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
52315+ reqmode |= GR_READ;
52316+
52317+ mode =
52318+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52319+ mnt);
52320+
52321+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52322+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52323+ reqmode & GR_READ ? " reading" : "",
52324+ reqmode & GR_WRITE ? " writing" : reqmode &
52325+ GR_APPEND ? " appending" : "");
52326+ return reqmode;
52327+ } else
52328+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52329+ {
52330+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52331+ reqmode & GR_READ ? " reading" : "",
52332+ reqmode & GR_WRITE ? " writing" : reqmode &
52333+ GR_APPEND ? " appending" : "");
52334+ return 0;
52335+ } else if (unlikely((mode & reqmode) != reqmode))
52336+ return 0;
52337+
52338+ return reqmode;
52339+}
52340+
52341+__u32
52342+gr_acl_handle_creat(const struct dentry * dentry,
52343+ const struct dentry * p_dentry,
52344+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
52345+ const int imode)
52346+{
52347+ __u32 reqmode = GR_WRITE | GR_CREATE;
52348+ __u32 mode;
52349+
52350+ if (acc_mode & MAY_APPEND)
52351+ reqmode |= GR_APPEND;
52352+ // if a directory was required or the directory already exists, then
52353+ // don't count this open as a read
52354+ if ((acc_mode & MAY_READ) &&
52355+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
52356+ reqmode |= GR_READ;
52357+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
52358+ reqmode |= GR_SETID;
52359+
52360+ mode =
52361+ gr_check_create(dentry, p_dentry, p_mnt,
52362+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52363+
52364+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52365+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52366+ reqmode & GR_READ ? " reading" : "",
52367+ reqmode & GR_WRITE ? " writing" : reqmode &
52368+ GR_APPEND ? " appending" : "");
52369+ return reqmode;
52370+ } else
52371+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52372+ {
52373+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52374+ reqmode & GR_READ ? " reading" : "",
52375+ reqmode & GR_WRITE ? " writing" : reqmode &
52376+ GR_APPEND ? " appending" : "");
52377+ return 0;
52378+ } else if (unlikely((mode & reqmode) != reqmode))
52379+ return 0;
52380+
52381+ return reqmode;
52382+}
52383+
52384+__u32
52385+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
52386+ const int fmode)
52387+{
52388+ __u32 mode, reqmode = GR_FIND;
52389+
52390+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
52391+ reqmode |= GR_EXEC;
52392+ if (fmode & S_IWOTH)
52393+ reqmode |= GR_WRITE;
52394+ if (fmode & S_IROTH)
52395+ reqmode |= GR_READ;
52396+
52397+ mode =
52398+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52399+ mnt);
52400+
52401+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52402+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52403+ reqmode & GR_READ ? " reading" : "",
52404+ reqmode & GR_WRITE ? " writing" : "",
52405+ reqmode & GR_EXEC ? " executing" : "");
52406+ return reqmode;
52407+ } else
52408+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52409+ {
52410+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52411+ reqmode & GR_READ ? " reading" : "",
52412+ reqmode & GR_WRITE ? " writing" : "",
52413+ reqmode & GR_EXEC ? " executing" : "");
52414+ return 0;
52415+ } else if (unlikely((mode & reqmode) != reqmode))
52416+ return 0;
52417+
52418+ return reqmode;
52419+}
52420+
52421+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
52422+{
52423+ __u32 mode;
52424+
52425+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
52426+
52427+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52428+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
52429+ return mode;
52430+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52431+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
52432+ return 0;
52433+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
52434+ return 0;
52435+
52436+ return (reqmode);
52437+}
52438+
52439+__u32
52440+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
52441+{
52442+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
52443+}
52444+
52445+__u32
52446+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
52447+{
52448+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
52449+}
52450+
52451+__u32
52452+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
52453+{
52454+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
52455+}
52456+
52457+__u32
52458+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
52459+{
52460+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
52461+}
52462+
52463+__u32
52464+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
52465+ mode_t mode)
52466+{
52467+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
52468+ return 1;
52469+
52470+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52471+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52472+ GR_FCHMOD_ACL_MSG);
52473+ } else {
52474+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
52475+ }
52476+}
52477+
52478+__u32
52479+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
52480+ mode_t mode)
52481+{
52482+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52483+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52484+ GR_CHMOD_ACL_MSG);
52485+ } else {
52486+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
52487+ }
52488+}
52489+
52490+__u32
52491+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
52492+{
52493+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
52494+}
52495+
52496+__u32
52497+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
52498+{
52499+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
52500+}
52501+
52502+__u32
52503+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
52504+{
52505+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
52506+}
52507+
52508+__u32
52509+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
52510+{
52511+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
52512+ GR_UNIXCONNECT_ACL_MSG);
52513+}
52514+
52515+/* hardlinks require at minimum create and link permission,
52516+ any additional privilege required is based on the
52517+ privilege of the file being linked to
52518+*/
52519+__u32
52520+gr_acl_handle_link(const struct dentry * new_dentry,
52521+ const struct dentry * parent_dentry,
52522+ const struct vfsmount * parent_mnt,
52523+ const struct dentry * old_dentry,
52524+ const struct vfsmount * old_mnt, const char *to)
52525+{
52526+ __u32 mode;
52527+ __u32 needmode = GR_CREATE | GR_LINK;
52528+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
52529+
52530+ mode =
52531+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
52532+ old_mnt);
52533+
52534+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
52535+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52536+ return mode;
52537+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52538+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52539+ return 0;
52540+ } else if (unlikely((mode & needmode) != needmode))
52541+ return 0;
52542+
52543+ return 1;
52544+}
52545+
52546+__u32
52547+gr_acl_handle_symlink(const struct dentry * new_dentry,
52548+ const struct dentry * parent_dentry,
52549+ const struct vfsmount * parent_mnt, const char *from)
52550+{
52551+ __u32 needmode = GR_WRITE | GR_CREATE;
52552+ __u32 mode;
52553+
52554+ mode =
52555+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
52556+ GR_CREATE | GR_AUDIT_CREATE |
52557+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
52558+
52559+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
52560+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52561+ return mode;
52562+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52563+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52564+ return 0;
52565+ } else if (unlikely((mode & needmode) != needmode))
52566+ return 0;
52567+
52568+ return (GR_WRITE | GR_CREATE);
52569+}
52570+
52571+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
52572+{
52573+ __u32 mode;
52574+
52575+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52576+
52577+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52578+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
52579+ return mode;
52580+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52581+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
52582+ return 0;
52583+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
52584+ return 0;
52585+
52586+ return (reqmode);
52587+}
52588+
52589+__u32
52590+gr_acl_handle_mknod(const struct dentry * new_dentry,
52591+ const struct dentry * parent_dentry,
52592+ const struct vfsmount * parent_mnt,
52593+ const int mode)
52594+{
52595+ __u32 reqmode = GR_WRITE | GR_CREATE;
52596+ if (unlikely(mode & (S_ISUID | S_ISGID)))
52597+ reqmode |= GR_SETID;
52598+
52599+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52600+ reqmode, GR_MKNOD_ACL_MSG);
52601+}
52602+
52603+__u32
52604+gr_acl_handle_mkdir(const struct dentry *new_dentry,
52605+ const struct dentry *parent_dentry,
52606+ const struct vfsmount *parent_mnt)
52607+{
52608+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52609+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
52610+}
52611+
52612+#define RENAME_CHECK_SUCCESS(old, new) \
52613+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
52614+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
52615+
52616+int
52617+gr_acl_handle_rename(struct dentry *new_dentry,
52618+ struct dentry *parent_dentry,
52619+ const struct vfsmount *parent_mnt,
52620+ struct dentry *old_dentry,
52621+ struct inode *old_parent_inode,
52622+ struct vfsmount *old_mnt, const char *newname)
52623+{
52624+ __u32 comp1, comp2;
52625+ int error = 0;
52626+
52627+ if (unlikely(!gr_acl_is_enabled()))
52628+ return 0;
52629+
52630+ if (!new_dentry->d_inode) {
52631+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
52632+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
52633+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
52634+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
52635+ GR_DELETE | GR_AUDIT_DELETE |
52636+ GR_AUDIT_READ | GR_AUDIT_WRITE |
52637+ GR_SUPPRESS, old_mnt);
52638+ } else {
52639+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
52640+ GR_CREATE | GR_DELETE |
52641+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
52642+ GR_AUDIT_READ | GR_AUDIT_WRITE |
52643+ GR_SUPPRESS, parent_mnt);
52644+ comp2 =
52645+ gr_search_file(old_dentry,
52646+ GR_READ | GR_WRITE | GR_AUDIT_READ |
52647+ GR_DELETE | GR_AUDIT_DELETE |
52648+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
52649+ }
52650+
52651+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
52652+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
52653+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52654+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
52655+ && !(comp2 & GR_SUPPRESS)) {
52656+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52657+ error = -EACCES;
52658+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
52659+ error = -EACCES;
52660+
52661+ return error;
52662+}
52663+
52664+void
52665+gr_acl_handle_exit(void)
52666+{
52667+ u16 id;
52668+ char *rolename;
52669+ struct file *exec_file;
52670+
52671+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
52672+ !(current->role->roletype & GR_ROLE_PERSIST))) {
52673+ id = current->acl_role_id;
52674+ rolename = current->role->rolename;
52675+ gr_set_acls(1);
52676+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
52677+ }
52678+
52679+ write_lock(&grsec_exec_file_lock);
52680+ exec_file = current->exec_file;
52681+ current->exec_file = NULL;
52682+ write_unlock(&grsec_exec_file_lock);
52683+
52684+ if (exec_file)
52685+ fput(exec_file);
52686+}
52687+
52688+int
52689+gr_acl_handle_procpidmem(const struct task_struct *task)
52690+{
52691+ if (unlikely(!gr_acl_is_enabled()))
52692+ return 0;
52693+
52694+ if (task != current && task->acl->mode & GR_PROTPROCFD)
52695+ return -EACCES;
52696+
52697+ return 0;
52698+}
52699diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
52700new file mode 100644
52701index 0000000..17050ca
52702--- /dev/null
52703+++ b/grsecurity/gracl_ip.c
52704@@ -0,0 +1,381 @@
52705+#include <linux/kernel.h>
52706+#include <asm/uaccess.h>
52707+#include <asm/errno.h>
52708+#include <net/sock.h>
52709+#include <linux/file.h>
52710+#include <linux/fs.h>
52711+#include <linux/net.h>
52712+#include <linux/in.h>
52713+#include <linux/skbuff.h>
52714+#include <linux/ip.h>
52715+#include <linux/udp.h>
52716+#include <linux/types.h>
52717+#include <linux/sched.h>
52718+#include <linux/netdevice.h>
52719+#include <linux/inetdevice.h>
52720+#include <linux/gracl.h>
52721+#include <linux/grsecurity.h>
52722+#include <linux/grinternal.h>
52723+
52724+#define GR_BIND 0x01
52725+#define GR_CONNECT 0x02
52726+#define GR_INVERT 0x04
52727+#define GR_BINDOVERRIDE 0x08
52728+#define GR_CONNECTOVERRIDE 0x10
52729+#define GR_SOCK_FAMILY 0x20
52730+
52731+static const char * gr_protocols[IPPROTO_MAX] = {
52732+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
52733+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
52734+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
52735+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
52736+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
52737+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
52738+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
52739+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
52740+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
52741+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
52742+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
52743+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
52744+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
52745+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
52746+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
52747+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
52748+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
52749+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
52750+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
52751+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
52752+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
52753+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
52754+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
52755+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
52756+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
52757+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
52758+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
52759+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
52760+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
52761+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
52762+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
52763+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
52764+ };
52765+
52766+static const char * gr_socktypes[SOCK_MAX] = {
52767+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
52768+ "unknown:7", "unknown:8", "unknown:9", "packet"
52769+ };
52770+
52771+static const char * gr_sockfamilies[AF_MAX+1] = {
52772+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
52773+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
52774+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
52775+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
52776+ };
52777+
52778+const char *
52779+gr_proto_to_name(unsigned char proto)
52780+{
52781+ return gr_protocols[proto];
52782+}
52783+
52784+const char *
52785+gr_socktype_to_name(unsigned char type)
52786+{
52787+ return gr_socktypes[type];
52788+}
52789+
52790+const char *
52791+gr_sockfamily_to_name(unsigned char family)
52792+{
52793+ return gr_sockfamilies[family];
52794+}
52795+
52796+int
52797+gr_search_socket(const int domain, const int type, const int protocol)
52798+{
52799+ struct acl_subject_label *curr;
52800+ const struct cred *cred = current_cred();
52801+
52802+ if (unlikely(!gr_acl_is_enabled()))
52803+ goto exit;
52804+
52805+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
52806+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
52807+ goto exit; // let the kernel handle it
52808+
52809+ curr = current->acl;
52810+
52811+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
52812+ /* the family is allowed, if this is PF_INET allow it only if
52813+ the extra sock type/protocol checks pass */
52814+ if (domain == PF_INET)
52815+ goto inet_check;
52816+ goto exit;
52817+ } else {
52818+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52819+ __u32 fakeip = 0;
52820+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52821+ current->role->roletype, cred->uid,
52822+ cred->gid, current->exec_file ?
52823+ gr_to_filename(current->exec_file->f_path.dentry,
52824+ current->exec_file->f_path.mnt) :
52825+ curr->filename, curr->filename,
52826+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
52827+ &current->signal->saved_ip);
52828+ goto exit;
52829+ }
52830+ goto exit_fail;
52831+ }
52832+
52833+inet_check:
52834+ /* the rest of this checking is for IPv4 only */
52835+ if (!curr->ips)
52836+ goto exit;
52837+
52838+ if ((curr->ip_type & (1 << type)) &&
52839+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
52840+ goto exit;
52841+
52842+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52843+ /* we don't place acls on raw sockets , and sometimes
52844+ dgram/ip sockets are opened for ioctl and not
52845+ bind/connect, so we'll fake a bind learn log */
52846+ if (type == SOCK_RAW || type == SOCK_PACKET) {
52847+ __u32 fakeip = 0;
52848+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52849+ current->role->roletype, cred->uid,
52850+ cred->gid, current->exec_file ?
52851+ gr_to_filename(current->exec_file->f_path.dentry,
52852+ current->exec_file->f_path.mnt) :
52853+ curr->filename, curr->filename,
52854+ &fakeip, 0, type,
52855+ protocol, GR_CONNECT, &current->signal->saved_ip);
52856+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
52857+ __u32 fakeip = 0;
52858+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52859+ current->role->roletype, cred->uid,
52860+ cred->gid, current->exec_file ?
52861+ gr_to_filename(current->exec_file->f_path.dentry,
52862+ current->exec_file->f_path.mnt) :
52863+ curr->filename, curr->filename,
52864+ &fakeip, 0, type,
52865+ protocol, GR_BIND, &current->signal->saved_ip);
52866+ }
52867+ /* we'll log when they use connect or bind */
52868+ goto exit;
52869+ }
52870+
52871+exit_fail:
52872+ if (domain == PF_INET)
52873+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
52874+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
52875+ else
52876+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
52877+ gr_socktype_to_name(type), protocol);
52878+
52879+ return 0;
52880+exit:
52881+ return 1;
52882+}
52883+
52884+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
52885+{
52886+ if ((ip->mode & mode) &&
52887+ (ip_port >= ip->low) &&
52888+ (ip_port <= ip->high) &&
52889+ ((ntohl(ip_addr) & our_netmask) ==
52890+ (ntohl(our_addr) & our_netmask))
52891+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
52892+ && (ip->type & (1 << type))) {
52893+ if (ip->mode & GR_INVERT)
52894+ return 2; // specifically denied
52895+ else
52896+ return 1; // allowed
52897+ }
52898+
52899+ return 0; // not specifically allowed, may continue parsing
52900+}
52901+
52902+static int
52903+gr_search_connectbind(const int full_mode, struct sock *sk,
52904+ struct sockaddr_in *addr, const int type)
52905+{
52906+ char iface[IFNAMSIZ] = {0};
52907+ struct acl_subject_label *curr;
52908+ struct acl_ip_label *ip;
52909+ struct inet_sock *isk;
52910+ struct net_device *dev;
52911+ struct in_device *idev;
52912+ unsigned long i;
52913+ int ret;
52914+ int mode = full_mode & (GR_BIND | GR_CONNECT);
52915+ __u32 ip_addr = 0;
52916+ __u32 our_addr;
52917+ __u32 our_netmask;
52918+ char *p;
52919+ __u16 ip_port = 0;
52920+ const struct cred *cred = current_cred();
52921+
52922+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
52923+ return 0;
52924+
52925+ curr = current->acl;
52926+ isk = inet_sk(sk);
52927+
52928+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
52929+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
52930+ addr->sin_addr.s_addr = curr->inaddr_any_override;
52931+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
52932+ struct sockaddr_in saddr;
52933+ int err;
52934+
52935+ saddr.sin_family = AF_INET;
52936+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
52937+ saddr.sin_port = isk->inet_sport;
52938+
52939+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52940+ if (err)
52941+ return err;
52942+
52943+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52944+ if (err)
52945+ return err;
52946+ }
52947+
52948+ if (!curr->ips)
52949+ return 0;
52950+
52951+ ip_addr = addr->sin_addr.s_addr;
52952+ ip_port = ntohs(addr->sin_port);
52953+
52954+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52955+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52956+ current->role->roletype, cred->uid,
52957+ cred->gid, current->exec_file ?
52958+ gr_to_filename(current->exec_file->f_path.dentry,
52959+ current->exec_file->f_path.mnt) :
52960+ curr->filename, curr->filename,
52961+ &ip_addr, ip_port, type,
52962+ sk->sk_protocol, mode, &current->signal->saved_ip);
52963+ return 0;
52964+ }
52965+
52966+ for (i = 0; i < curr->ip_num; i++) {
52967+ ip = *(curr->ips + i);
52968+ if (ip->iface != NULL) {
52969+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
52970+ p = strchr(iface, ':');
52971+ if (p != NULL)
52972+ *p = '\0';
52973+ dev = dev_get_by_name(sock_net(sk), iface);
52974+ if (dev == NULL)
52975+ continue;
52976+ idev = in_dev_get(dev);
52977+ if (idev == NULL) {
52978+ dev_put(dev);
52979+ continue;
52980+ }
52981+ rcu_read_lock();
52982+ for_ifa(idev) {
52983+ if (!strcmp(ip->iface, ifa->ifa_label)) {
52984+ our_addr = ifa->ifa_address;
52985+ our_netmask = 0xffffffff;
52986+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52987+ if (ret == 1) {
52988+ rcu_read_unlock();
52989+ in_dev_put(idev);
52990+ dev_put(dev);
52991+ return 0;
52992+ } else if (ret == 2) {
52993+ rcu_read_unlock();
52994+ in_dev_put(idev);
52995+ dev_put(dev);
52996+ goto denied;
52997+ }
52998+ }
52999+ } endfor_ifa(idev);
53000+ rcu_read_unlock();
53001+ in_dev_put(idev);
53002+ dev_put(dev);
53003+ } else {
53004+ our_addr = ip->addr;
53005+ our_netmask = ip->netmask;
53006+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53007+ if (ret == 1)
53008+ return 0;
53009+ else if (ret == 2)
53010+ goto denied;
53011+ }
53012+ }
53013+
53014+denied:
53015+ if (mode == GR_BIND)
53016+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53017+ else if (mode == GR_CONNECT)
53018+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53019+
53020+ return -EACCES;
53021+}
53022+
53023+int
53024+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
53025+{
53026+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
53027+}
53028+
53029+int
53030+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
53031+{
53032+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
53033+}
53034+
53035+int gr_search_listen(struct socket *sock)
53036+{
53037+ struct sock *sk = sock->sk;
53038+ struct sockaddr_in addr;
53039+
53040+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53041+ addr.sin_port = inet_sk(sk)->inet_sport;
53042+
53043+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53044+}
53045+
53046+int gr_search_accept(struct socket *sock)
53047+{
53048+ struct sock *sk = sock->sk;
53049+ struct sockaddr_in addr;
53050+
53051+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53052+ addr.sin_port = inet_sk(sk)->inet_sport;
53053+
53054+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53055+}
53056+
53057+int
53058+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
53059+{
53060+ if (addr)
53061+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
53062+ else {
53063+ struct sockaddr_in sin;
53064+ const struct inet_sock *inet = inet_sk(sk);
53065+
53066+ sin.sin_addr.s_addr = inet->inet_daddr;
53067+ sin.sin_port = inet->inet_dport;
53068+
53069+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53070+ }
53071+}
53072+
53073+int
53074+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
53075+{
53076+ struct sockaddr_in sin;
53077+
53078+ if (unlikely(skb->len < sizeof (struct udphdr)))
53079+ return 0; // skip this packet
53080+
53081+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
53082+ sin.sin_port = udp_hdr(skb)->source;
53083+
53084+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53085+}
53086diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
53087new file mode 100644
53088index 0000000..25f54ef
53089--- /dev/null
53090+++ b/grsecurity/gracl_learn.c
53091@@ -0,0 +1,207 @@
53092+#include <linux/kernel.h>
53093+#include <linux/mm.h>
53094+#include <linux/sched.h>
53095+#include <linux/poll.h>
53096+#include <linux/string.h>
53097+#include <linux/file.h>
53098+#include <linux/types.h>
53099+#include <linux/vmalloc.h>
53100+#include <linux/grinternal.h>
53101+
53102+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
53103+ size_t count, loff_t *ppos);
53104+extern int gr_acl_is_enabled(void);
53105+
53106+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
53107+static int gr_learn_attached;
53108+
53109+/* use a 512k buffer */
53110+#define LEARN_BUFFER_SIZE (512 * 1024)
53111+
53112+static DEFINE_SPINLOCK(gr_learn_lock);
53113+static DEFINE_MUTEX(gr_learn_user_mutex);
53114+
53115+/* we need to maintain two buffers, so that the kernel context of grlearn
53116+ uses a semaphore around the userspace copying, and the other kernel contexts
53117+ use a spinlock when copying into the buffer, since they cannot sleep
53118+*/
53119+static char *learn_buffer;
53120+static char *learn_buffer_user;
53121+static int learn_buffer_len;
53122+static int learn_buffer_user_len;
53123+
53124+static ssize_t
53125+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
53126+{
53127+ DECLARE_WAITQUEUE(wait, current);
53128+ ssize_t retval = 0;
53129+
53130+ add_wait_queue(&learn_wait, &wait);
53131+ set_current_state(TASK_INTERRUPTIBLE);
53132+ do {
53133+ mutex_lock(&gr_learn_user_mutex);
53134+ spin_lock(&gr_learn_lock);
53135+ if (learn_buffer_len)
53136+ break;
53137+ spin_unlock(&gr_learn_lock);
53138+ mutex_unlock(&gr_learn_user_mutex);
53139+ if (file->f_flags & O_NONBLOCK) {
53140+ retval = -EAGAIN;
53141+ goto out;
53142+ }
53143+ if (signal_pending(current)) {
53144+ retval = -ERESTARTSYS;
53145+ goto out;
53146+ }
53147+
53148+ schedule();
53149+ } while (1);
53150+
53151+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
53152+ learn_buffer_user_len = learn_buffer_len;
53153+ retval = learn_buffer_len;
53154+ learn_buffer_len = 0;
53155+
53156+ spin_unlock(&gr_learn_lock);
53157+
53158+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
53159+ retval = -EFAULT;
53160+
53161+ mutex_unlock(&gr_learn_user_mutex);
53162+out:
53163+ set_current_state(TASK_RUNNING);
53164+ remove_wait_queue(&learn_wait, &wait);
53165+ return retval;
53166+}
53167+
53168+static unsigned int
53169+poll_learn(struct file * file, poll_table * wait)
53170+{
53171+ poll_wait(file, &learn_wait, wait);
53172+
53173+ if (learn_buffer_len)
53174+ return (POLLIN | POLLRDNORM);
53175+
53176+ return 0;
53177+}
53178+
53179+void
53180+gr_clear_learn_entries(void)
53181+{
53182+ char *tmp;
53183+
53184+ mutex_lock(&gr_learn_user_mutex);
53185+ spin_lock(&gr_learn_lock);
53186+ tmp = learn_buffer;
53187+ learn_buffer = NULL;
53188+ spin_unlock(&gr_learn_lock);
53189+ if (tmp)
53190+ vfree(tmp);
53191+ if (learn_buffer_user != NULL) {
53192+ vfree(learn_buffer_user);
53193+ learn_buffer_user = NULL;
53194+ }
53195+ learn_buffer_len = 0;
53196+ mutex_unlock(&gr_learn_user_mutex);
53197+
53198+ return;
53199+}
53200+
53201+void
53202+gr_add_learn_entry(const char *fmt, ...)
53203+{
53204+ va_list args;
53205+ unsigned int len;
53206+
53207+ if (!gr_learn_attached)
53208+ return;
53209+
53210+ spin_lock(&gr_learn_lock);
53211+
53212+ /* leave a gap at the end so we know when it's "full" but don't have to
53213+ compute the exact length of the string we're trying to append
53214+ */
53215+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
53216+ spin_unlock(&gr_learn_lock);
53217+ wake_up_interruptible(&learn_wait);
53218+ return;
53219+ }
53220+ if (learn_buffer == NULL) {
53221+ spin_unlock(&gr_learn_lock);
53222+ return;
53223+ }
53224+
53225+ va_start(args, fmt);
53226+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
53227+ va_end(args);
53228+
53229+ learn_buffer_len += len + 1;
53230+
53231+ spin_unlock(&gr_learn_lock);
53232+ wake_up_interruptible(&learn_wait);
53233+
53234+ return;
53235+}
53236+
53237+static int
53238+open_learn(struct inode *inode, struct file *file)
53239+{
53240+ if (file->f_mode & FMODE_READ && gr_learn_attached)
53241+ return -EBUSY;
53242+ if (file->f_mode & FMODE_READ) {
53243+ int retval = 0;
53244+ mutex_lock(&gr_learn_user_mutex);
53245+ if (learn_buffer == NULL)
53246+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
53247+ if (learn_buffer_user == NULL)
53248+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
53249+ if (learn_buffer == NULL) {
53250+ retval = -ENOMEM;
53251+ goto out_error;
53252+ }
53253+ if (learn_buffer_user == NULL) {
53254+ retval = -ENOMEM;
53255+ goto out_error;
53256+ }
53257+ learn_buffer_len = 0;
53258+ learn_buffer_user_len = 0;
53259+ gr_learn_attached = 1;
53260+out_error:
53261+ mutex_unlock(&gr_learn_user_mutex);
53262+ return retval;
53263+ }
53264+ return 0;
53265+}
53266+
53267+static int
53268+close_learn(struct inode *inode, struct file *file)
53269+{
53270+ if (file->f_mode & FMODE_READ) {
53271+ char *tmp = NULL;
53272+ mutex_lock(&gr_learn_user_mutex);
53273+ spin_lock(&gr_learn_lock);
53274+ tmp = learn_buffer;
53275+ learn_buffer = NULL;
53276+ spin_unlock(&gr_learn_lock);
53277+ if (tmp)
53278+ vfree(tmp);
53279+ if (learn_buffer_user != NULL) {
53280+ vfree(learn_buffer_user);
53281+ learn_buffer_user = NULL;
53282+ }
53283+ learn_buffer_len = 0;
53284+ learn_buffer_user_len = 0;
53285+ gr_learn_attached = 0;
53286+ mutex_unlock(&gr_learn_user_mutex);
53287+ }
53288+
53289+ return 0;
53290+}
53291+
53292+const struct file_operations grsec_fops = {
53293+ .read = read_learn,
53294+ .write = write_grsec_handler,
53295+ .open = open_learn,
53296+ .release = close_learn,
53297+ .poll = poll_learn,
53298+};
53299diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
53300new file mode 100644
53301index 0000000..39645c9
53302--- /dev/null
53303+++ b/grsecurity/gracl_res.c
53304@@ -0,0 +1,68 @@
53305+#include <linux/kernel.h>
53306+#include <linux/sched.h>
53307+#include <linux/gracl.h>
53308+#include <linux/grinternal.h>
53309+
53310+static const char *restab_log[] = {
53311+ [RLIMIT_CPU] = "RLIMIT_CPU",
53312+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
53313+ [RLIMIT_DATA] = "RLIMIT_DATA",
53314+ [RLIMIT_STACK] = "RLIMIT_STACK",
53315+ [RLIMIT_CORE] = "RLIMIT_CORE",
53316+ [RLIMIT_RSS] = "RLIMIT_RSS",
53317+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
53318+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
53319+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
53320+ [RLIMIT_AS] = "RLIMIT_AS",
53321+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
53322+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
53323+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
53324+ [RLIMIT_NICE] = "RLIMIT_NICE",
53325+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
53326+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
53327+ [GR_CRASH_RES] = "RLIMIT_CRASH"
53328+};
53329+
53330+void
53331+gr_log_resource(const struct task_struct *task,
53332+ const int res, const unsigned long wanted, const int gt)
53333+{
53334+ const struct cred *cred;
53335+ unsigned long rlim;
53336+
53337+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
53338+ return;
53339+
53340+ // not yet supported resource
53341+ if (unlikely(!restab_log[res]))
53342+ return;
53343+
53344+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
53345+ rlim = task_rlimit_max(task, res);
53346+ else
53347+ rlim = task_rlimit(task, res);
53348+
53349+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
53350+ return;
53351+
53352+ rcu_read_lock();
53353+ cred = __task_cred(task);
53354+
53355+ if (res == RLIMIT_NPROC &&
53356+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
53357+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
53358+ goto out_rcu_unlock;
53359+ else if (res == RLIMIT_MEMLOCK &&
53360+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
53361+ goto out_rcu_unlock;
53362+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
53363+ goto out_rcu_unlock;
53364+ rcu_read_unlock();
53365+
53366+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
53367+
53368+ return;
53369+out_rcu_unlock:
53370+ rcu_read_unlock();
53371+ return;
53372+}
53373diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
53374new file mode 100644
53375index 0000000..5556be3
53376--- /dev/null
53377+++ b/grsecurity/gracl_segv.c
53378@@ -0,0 +1,299 @@
53379+#include <linux/kernel.h>
53380+#include <linux/mm.h>
53381+#include <asm/uaccess.h>
53382+#include <asm/errno.h>
53383+#include <asm/mman.h>
53384+#include <net/sock.h>
53385+#include <linux/file.h>
53386+#include <linux/fs.h>
53387+#include <linux/net.h>
53388+#include <linux/in.h>
53389+#include <linux/slab.h>
53390+#include <linux/types.h>
53391+#include <linux/sched.h>
53392+#include <linux/timer.h>
53393+#include <linux/gracl.h>
53394+#include <linux/grsecurity.h>
53395+#include <linux/grinternal.h>
53396+
53397+static struct crash_uid *uid_set;
53398+static unsigned short uid_used;
53399+static DEFINE_SPINLOCK(gr_uid_lock);
53400+extern rwlock_t gr_inode_lock;
53401+extern struct acl_subject_label *
53402+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
53403+ struct acl_role_label *role);
53404+
53405+#ifdef CONFIG_BTRFS_FS
53406+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53407+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53408+#endif
53409+
53410+static inline dev_t __get_dev(const struct dentry *dentry)
53411+{
53412+#ifdef CONFIG_BTRFS_FS
53413+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53414+ return get_btrfs_dev_from_inode(dentry->d_inode);
53415+ else
53416+#endif
53417+ return dentry->d_inode->i_sb->s_dev;
53418+}
53419+
53420+int
53421+gr_init_uidset(void)
53422+{
53423+ uid_set =
53424+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
53425+ uid_used = 0;
53426+
53427+ return uid_set ? 1 : 0;
53428+}
53429+
53430+void
53431+gr_free_uidset(void)
53432+{
53433+ if (uid_set)
53434+ kfree(uid_set);
53435+
53436+ return;
53437+}
53438+
53439+int
53440+gr_find_uid(const uid_t uid)
53441+{
53442+ struct crash_uid *tmp = uid_set;
53443+ uid_t buid;
53444+ int low = 0, high = uid_used - 1, mid;
53445+
53446+ while (high >= low) {
53447+ mid = (low + high) >> 1;
53448+ buid = tmp[mid].uid;
53449+ if (buid == uid)
53450+ return mid;
53451+ if (buid > uid)
53452+ high = mid - 1;
53453+ if (buid < uid)
53454+ low = mid + 1;
53455+ }
53456+
53457+ return -1;
53458+}
53459+
53460+static __inline__ void
53461+gr_insertsort(void)
53462+{
53463+ unsigned short i, j;
53464+ struct crash_uid index;
53465+
53466+ for (i = 1; i < uid_used; i++) {
53467+ index = uid_set[i];
53468+ j = i;
53469+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
53470+ uid_set[j] = uid_set[j - 1];
53471+ j--;
53472+ }
53473+ uid_set[j] = index;
53474+ }
53475+
53476+ return;
53477+}
53478+
53479+static __inline__ void
53480+gr_insert_uid(const uid_t uid, const unsigned long expires)
53481+{
53482+ int loc;
53483+
53484+ if (uid_used == GR_UIDTABLE_MAX)
53485+ return;
53486+
53487+ loc = gr_find_uid(uid);
53488+
53489+ if (loc >= 0) {
53490+ uid_set[loc].expires = expires;
53491+ return;
53492+ }
53493+
53494+ uid_set[uid_used].uid = uid;
53495+ uid_set[uid_used].expires = expires;
53496+ uid_used++;
53497+
53498+ gr_insertsort();
53499+
53500+ return;
53501+}
53502+
53503+void
53504+gr_remove_uid(const unsigned short loc)
53505+{
53506+ unsigned short i;
53507+
53508+ for (i = loc + 1; i < uid_used; i++)
53509+ uid_set[i - 1] = uid_set[i];
53510+
53511+ uid_used--;
53512+
53513+ return;
53514+}
53515+
53516+int
53517+gr_check_crash_uid(const uid_t uid)
53518+{
53519+ int loc;
53520+ int ret = 0;
53521+
53522+ if (unlikely(!gr_acl_is_enabled()))
53523+ return 0;
53524+
53525+ spin_lock(&gr_uid_lock);
53526+ loc = gr_find_uid(uid);
53527+
53528+ if (loc < 0)
53529+ goto out_unlock;
53530+
53531+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
53532+ gr_remove_uid(loc);
53533+ else
53534+ ret = 1;
53535+
53536+out_unlock:
53537+ spin_unlock(&gr_uid_lock);
53538+ return ret;
53539+}
53540+
53541+static __inline__ int
53542+proc_is_setxid(const struct cred *cred)
53543+{
53544+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
53545+ cred->uid != cred->fsuid)
53546+ return 1;
53547+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
53548+ cred->gid != cred->fsgid)
53549+ return 1;
53550+
53551+ return 0;
53552+}
53553+
53554+extern int gr_fake_force_sig(int sig, struct task_struct *t);
53555+
53556+void
53557+gr_handle_crash(struct task_struct *task, const int sig)
53558+{
53559+ struct acl_subject_label *curr;
53560+ struct task_struct *tsk, *tsk2;
53561+ const struct cred *cred;
53562+ const struct cred *cred2;
53563+
53564+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
53565+ return;
53566+
53567+ if (unlikely(!gr_acl_is_enabled()))
53568+ return;
53569+
53570+ curr = task->acl;
53571+
53572+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
53573+ return;
53574+
53575+ if (time_before_eq(curr->expires, get_seconds())) {
53576+ curr->expires = 0;
53577+ curr->crashes = 0;
53578+ }
53579+
53580+ curr->crashes++;
53581+
53582+ if (!curr->expires)
53583+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
53584+
53585+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53586+ time_after(curr->expires, get_seconds())) {
53587+ rcu_read_lock();
53588+ cred = __task_cred(task);
53589+ if (cred->uid && proc_is_setxid(cred)) {
53590+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53591+ spin_lock(&gr_uid_lock);
53592+ gr_insert_uid(cred->uid, curr->expires);
53593+ spin_unlock(&gr_uid_lock);
53594+ curr->expires = 0;
53595+ curr->crashes = 0;
53596+ read_lock(&tasklist_lock);
53597+ do_each_thread(tsk2, tsk) {
53598+ cred2 = __task_cred(tsk);
53599+ if (tsk != task && cred2->uid == cred->uid)
53600+ gr_fake_force_sig(SIGKILL, tsk);
53601+ } while_each_thread(tsk2, tsk);
53602+ read_unlock(&tasklist_lock);
53603+ } else {
53604+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53605+ read_lock(&tasklist_lock);
53606+ read_lock(&grsec_exec_file_lock);
53607+ do_each_thread(tsk2, tsk) {
53608+ if (likely(tsk != task)) {
53609+ // if this thread has the same subject as the one that triggered
53610+ // RES_CRASH and it's the same binary, kill it
53611+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
53612+ gr_fake_force_sig(SIGKILL, tsk);
53613+ }
53614+ } while_each_thread(tsk2, tsk);
53615+ read_unlock(&grsec_exec_file_lock);
53616+ read_unlock(&tasklist_lock);
53617+ }
53618+ rcu_read_unlock();
53619+ }
53620+
53621+ return;
53622+}
53623+
53624+int
53625+gr_check_crash_exec(const struct file *filp)
53626+{
53627+ struct acl_subject_label *curr;
53628+
53629+ if (unlikely(!gr_acl_is_enabled()))
53630+ return 0;
53631+
53632+ read_lock(&gr_inode_lock);
53633+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
53634+ __get_dev(filp->f_path.dentry),
53635+ current->role);
53636+ read_unlock(&gr_inode_lock);
53637+
53638+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
53639+ (!curr->crashes && !curr->expires))
53640+ return 0;
53641+
53642+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53643+ time_after(curr->expires, get_seconds()))
53644+ return 1;
53645+ else if (time_before_eq(curr->expires, get_seconds())) {
53646+ curr->crashes = 0;
53647+ curr->expires = 0;
53648+ }
53649+
53650+ return 0;
53651+}
53652+
53653+void
53654+gr_handle_alertkill(struct task_struct *task)
53655+{
53656+ struct acl_subject_label *curracl;
53657+ __u32 curr_ip;
53658+ struct task_struct *p, *p2;
53659+
53660+ if (unlikely(!gr_acl_is_enabled()))
53661+ return;
53662+
53663+ curracl = task->acl;
53664+ curr_ip = task->signal->curr_ip;
53665+
53666+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
53667+ read_lock(&tasklist_lock);
53668+ do_each_thread(p2, p) {
53669+ if (p->signal->curr_ip == curr_ip)
53670+ gr_fake_force_sig(SIGKILL, p);
53671+ } while_each_thread(p2, p);
53672+ read_unlock(&tasklist_lock);
53673+ } else if (curracl->mode & GR_KILLPROC)
53674+ gr_fake_force_sig(SIGKILL, task);
53675+
53676+ return;
53677+}
53678diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
53679new file mode 100644
53680index 0000000..9d83a69
53681--- /dev/null
53682+++ b/grsecurity/gracl_shm.c
53683@@ -0,0 +1,40 @@
53684+#include <linux/kernel.h>
53685+#include <linux/mm.h>
53686+#include <linux/sched.h>
53687+#include <linux/file.h>
53688+#include <linux/ipc.h>
53689+#include <linux/gracl.h>
53690+#include <linux/grsecurity.h>
53691+#include <linux/grinternal.h>
53692+
53693+int
53694+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53695+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53696+{
53697+ struct task_struct *task;
53698+
53699+ if (!gr_acl_is_enabled())
53700+ return 1;
53701+
53702+ rcu_read_lock();
53703+ read_lock(&tasklist_lock);
53704+
53705+ task = find_task_by_vpid(shm_cprid);
53706+
53707+ if (unlikely(!task))
53708+ task = find_task_by_vpid(shm_lapid);
53709+
53710+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
53711+ (task->pid == shm_lapid)) &&
53712+ (task->acl->mode & GR_PROTSHM) &&
53713+ (task->acl != current->acl))) {
53714+ read_unlock(&tasklist_lock);
53715+ rcu_read_unlock();
53716+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
53717+ return 0;
53718+ }
53719+ read_unlock(&tasklist_lock);
53720+ rcu_read_unlock();
53721+
53722+ return 1;
53723+}
53724diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
53725new file mode 100644
53726index 0000000..bc0be01
53727--- /dev/null
53728+++ b/grsecurity/grsec_chdir.c
53729@@ -0,0 +1,19 @@
53730+#include <linux/kernel.h>
53731+#include <linux/sched.h>
53732+#include <linux/fs.h>
53733+#include <linux/file.h>
53734+#include <linux/grsecurity.h>
53735+#include <linux/grinternal.h>
53736+
53737+void
53738+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
53739+{
53740+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53741+ if ((grsec_enable_chdir && grsec_enable_group &&
53742+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
53743+ !grsec_enable_group)) {
53744+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
53745+ }
53746+#endif
53747+ return;
53748+}
53749diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
53750new file mode 100644
53751index 0000000..a2dc675
53752--- /dev/null
53753+++ b/grsecurity/grsec_chroot.c
53754@@ -0,0 +1,351 @@
53755+#include <linux/kernel.h>
53756+#include <linux/module.h>
53757+#include <linux/sched.h>
53758+#include <linux/file.h>
53759+#include <linux/fs.h>
53760+#include <linux/mount.h>
53761+#include <linux/types.h>
53762+#include <linux/pid_namespace.h>
53763+#include <linux/grsecurity.h>
53764+#include <linux/grinternal.h>
53765+
53766+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
53767+{
53768+#ifdef CONFIG_GRKERNSEC
53769+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
53770+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
53771+ task->gr_is_chrooted = 1;
53772+ else
53773+ task->gr_is_chrooted = 0;
53774+
53775+ task->gr_chroot_dentry = path->dentry;
53776+#endif
53777+ return;
53778+}
53779+
53780+void gr_clear_chroot_entries(struct task_struct *task)
53781+{
53782+#ifdef CONFIG_GRKERNSEC
53783+ task->gr_is_chrooted = 0;
53784+ task->gr_chroot_dentry = NULL;
53785+#endif
53786+ return;
53787+}
53788+
53789+int
53790+gr_handle_chroot_unix(const pid_t pid)
53791+{
53792+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53793+ struct task_struct *p;
53794+
53795+ if (unlikely(!grsec_enable_chroot_unix))
53796+ return 1;
53797+
53798+ if (likely(!proc_is_chrooted(current)))
53799+ return 1;
53800+
53801+ rcu_read_lock();
53802+ read_lock(&tasklist_lock);
53803+ p = find_task_by_vpid_unrestricted(pid);
53804+ if (unlikely(p && !have_same_root(current, p))) {
53805+ read_unlock(&tasklist_lock);
53806+ rcu_read_unlock();
53807+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
53808+ return 0;
53809+ }
53810+ read_unlock(&tasklist_lock);
53811+ rcu_read_unlock();
53812+#endif
53813+ return 1;
53814+}
53815+
53816+int
53817+gr_handle_chroot_nice(void)
53818+{
53819+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53820+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
53821+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
53822+ return -EPERM;
53823+ }
53824+#endif
53825+ return 0;
53826+}
53827+
53828+int
53829+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
53830+{
53831+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53832+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
53833+ && proc_is_chrooted(current)) {
53834+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
53835+ return -EACCES;
53836+ }
53837+#endif
53838+ return 0;
53839+}
53840+
53841+int
53842+gr_handle_chroot_rawio(const struct inode *inode)
53843+{
53844+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53845+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
53846+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
53847+ return 1;
53848+#endif
53849+ return 0;
53850+}
53851+
53852+int
53853+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
53854+{
53855+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53856+ struct task_struct *p;
53857+ int ret = 0;
53858+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
53859+ return ret;
53860+
53861+ read_lock(&tasklist_lock);
53862+ do_each_pid_task(pid, type, p) {
53863+ if (!have_same_root(current, p)) {
53864+ ret = 1;
53865+ goto out;
53866+ }
53867+ } while_each_pid_task(pid, type, p);
53868+out:
53869+ read_unlock(&tasklist_lock);
53870+ return ret;
53871+#endif
53872+ return 0;
53873+}
53874+
53875+int
53876+gr_pid_is_chrooted(struct task_struct *p)
53877+{
53878+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53879+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
53880+ return 0;
53881+
53882+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
53883+ !have_same_root(current, p)) {
53884+ return 1;
53885+ }
53886+#endif
53887+ return 0;
53888+}
53889+
53890+EXPORT_SYMBOL(gr_pid_is_chrooted);
53891+
53892+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
53893+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
53894+{
53895+ struct path path, currentroot;
53896+ int ret = 0;
53897+
53898+ path.dentry = (struct dentry *)u_dentry;
53899+ path.mnt = (struct vfsmount *)u_mnt;
53900+ get_fs_root(current->fs, &currentroot);
53901+ if (path_is_under(&path, &currentroot))
53902+ ret = 1;
53903+ path_put(&currentroot);
53904+
53905+ return ret;
53906+}
53907+#endif
53908+
53909+int
53910+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
53911+{
53912+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53913+ if (!grsec_enable_chroot_fchdir)
53914+ return 1;
53915+
53916+ if (!proc_is_chrooted(current))
53917+ return 1;
53918+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
53919+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
53920+ return 0;
53921+ }
53922+#endif
53923+ return 1;
53924+}
53925+
53926+int
53927+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53928+ const time_t shm_createtime)
53929+{
53930+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53931+ struct task_struct *p;
53932+ time_t starttime;
53933+
53934+ if (unlikely(!grsec_enable_chroot_shmat))
53935+ return 1;
53936+
53937+ if (likely(!proc_is_chrooted(current)))
53938+ return 1;
53939+
53940+ rcu_read_lock();
53941+ read_lock(&tasklist_lock);
53942+
53943+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
53944+ starttime = p->start_time.tv_sec;
53945+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
53946+ if (have_same_root(current, p)) {
53947+ goto allow;
53948+ } else {
53949+ read_unlock(&tasklist_lock);
53950+ rcu_read_unlock();
53951+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53952+ return 0;
53953+ }
53954+ }
53955+ /* creator exited, pid reuse, fall through to next check */
53956+ }
53957+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
53958+ if (unlikely(!have_same_root(current, p))) {
53959+ read_unlock(&tasklist_lock);
53960+ rcu_read_unlock();
53961+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53962+ return 0;
53963+ }
53964+ }
53965+
53966+allow:
53967+ read_unlock(&tasklist_lock);
53968+ rcu_read_unlock();
53969+#endif
53970+ return 1;
53971+}
53972+
53973+void
53974+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
53975+{
53976+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53977+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
53978+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
53979+#endif
53980+ return;
53981+}
53982+
53983+int
53984+gr_handle_chroot_mknod(const struct dentry *dentry,
53985+ const struct vfsmount *mnt, const int mode)
53986+{
53987+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53988+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
53989+ proc_is_chrooted(current)) {
53990+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
53991+ return -EPERM;
53992+ }
53993+#endif
53994+ return 0;
53995+}
53996+
53997+int
53998+gr_handle_chroot_mount(const struct dentry *dentry,
53999+ const struct vfsmount *mnt, const char *dev_name)
54000+{
54001+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54002+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
54003+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
54004+ return -EPERM;
54005+ }
54006+#endif
54007+ return 0;
54008+}
54009+
54010+int
54011+gr_handle_chroot_pivot(void)
54012+{
54013+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54014+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
54015+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
54016+ return -EPERM;
54017+ }
54018+#endif
54019+ return 0;
54020+}
54021+
54022+int
54023+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
54024+{
54025+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54026+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
54027+ !gr_is_outside_chroot(dentry, mnt)) {
54028+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
54029+ return -EPERM;
54030+ }
54031+#endif
54032+ return 0;
54033+}
54034+
54035+extern const char *captab_log[];
54036+extern int captab_log_entries;
54037+
54038+int
54039+gr_chroot_is_capable(const int cap)
54040+{
54041+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54042+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54043+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54044+ if (cap_raised(chroot_caps, cap)) {
54045+ const struct cred *creds = current_cred();
54046+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
54047+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
54048+ }
54049+ return 0;
54050+ }
54051+ }
54052+#endif
54053+ return 1;
54054+}
54055+
54056+int
54057+gr_chroot_is_capable_nolog(const int cap)
54058+{
54059+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54060+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54061+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54062+ if (cap_raised(chroot_caps, cap)) {
54063+ return 0;
54064+ }
54065+ }
54066+#endif
54067+ return 1;
54068+}
54069+
54070+int
54071+gr_handle_chroot_sysctl(const int op)
54072+{
54073+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54074+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
54075+ proc_is_chrooted(current))
54076+ return -EACCES;
54077+#endif
54078+ return 0;
54079+}
54080+
54081+void
54082+gr_handle_chroot_chdir(struct path *path)
54083+{
54084+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54085+ if (grsec_enable_chroot_chdir)
54086+ set_fs_pwd(current->fs, path);
54087+#endif
54088+ return;
54089+}
54090+
54091+int
54092+gr_handle_chroot_chmod(const struct dentry *dentry,
54093+ const struct vfsmount *mnt, const int mode)
54094+{
54095+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54096+ /* allow chmod +s on directories, but not files */
54097+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
54098+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
54099+ proc_is_chrooted(current)) {
54100+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
54101+ return -EPERM;
54102+ }
54103+#endif
54104+ return 0;
54105+}
54106diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
54107new file mode 100644
54108index 0000000..d81a586
54109--- /dev/null
54110+++ b/grsecurity/grsec_disabled.c
54111@@ -0,0 +1,439 @@
54112+#include <linux/kernel.h>
54113+#include <linux/module.h>
54114+#include <linux/sched.h>
54115+#include <linux/file.h>
54116+#include <linux/fs.h>
54117+#include <linux/kdev_t.h>
54118+#include <linux/net.h>
54119+#include <linux/in.h>
54120+#include <linux/ip.h>
54121+#include <linux/skbuff.h>
54122+#include <linux/sysctl.h>
54123+
54124+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54125+void
54126+pax_set_initial_flags(struct linux_binprm *bprm)
54127+{
54128+ return;
54129+}
54130+#endif
54131+
54132+#ifdef CONFIG_SYSCTL
54133+__u32
54134+gr_handle_sysctl(const struct ctl_table * table, const int op)
54135+{
54136+ return 0;
54137+}
54138+#endif
54139+
54140+#ifdef CONFIG_TASKSTATS
54141+int gr_is_taskstats_denied(int pid)
54142+{
54143+ return 0;
54144+}
54145+#endif
54146+
54147+int
54148+gr_acl_is_enabled(void)
54149+{
54150+ return 0;
54151+}
54152+
54153+void
54154+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54155+{
54156+ return;
54157+}
54158+
54159+int
54160+gr_handle_rawio(const struct inode *inode)
54161+{
54162+ return 0;
54163+}
54164+
54165+void
54166+gr_acl_handle_psacct(struct task_struct *task, const long code)
54167+{
54168+ return;
54169+}
54170+
54171+int
54172+gr_handle_ptrace(struct task_struct *task, const long request)
54173+{
54174+ return 0;
54175+}
54176+
54177+int
54178+gr_handle_proc_ptrace(struct task_struct *task)
54179+{
54180+ return 0;
54181+}
54182+
54183+void
54184+gr_learn_resource(const struct task_struct *task,
54185+ const int res, const unsigned long wanted, const int gt)
54186+{
54187+ return;
54188+}
54189+
54190+int
54191+gr_set_acls(const int type)
54192+{
54193+ return 0;
54194+}
54195+
54196+int
54197+gr_check_hidden_task(const struct task_struct *tsk)
54198+{
54199+ return 0;
54200+}
54201+
54202+int
54203+gr_check_protected_task(const struct task_struct *task)
54204+{
54205+ return 0;
54206+}
54207+
54208+int
54209+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
54210+{
54211+ return 0;
54212+}
54213+
54214+void
54215+gr_copy_label(struct task_struct *tsk)
54216+{
54217+ return;
54218+}
54219+
54220+void
54221+gr_set_pax_flags(struct task_struct *task)
54222+{
54223+ return;
54224+}
54225+
54226+int
54227+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54228+ const int unsafe_share)
54229+{
54230+ return 0;
54231+}
54232+
54233+void
54234+gr_handle_delete(const ino_t ino, const dev_t dev)
54235+{
54236+ return;
54237+}
54238+
54239+void
54240+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54241+{
54242+ return;
54243+}
54244+
54245+void
54246+gr_handle_crash(struct task_struct *task, const int sig)
54247+{
54248+ return;
54249+}
54250+
54251+int
54252+gr_check_crash_exec(const struct file *filp)
54253+{
54254+ return 0;
54255+}
54256+
54257+int
54258+gr_check_crash_uid(const uid_t uid)
54259+{
54260+ return 0;
54261+}
54262+
54263+void
54264+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54265+ struct dentry *old_dentry,
54266+ struct dentry *new_dentry,
54267+ struct vfsmount *mnt, const __u8 replace)
54268+{
54269+ return;
54270+}
54271+
54272+int
54273+gr_search_socket(const int family, const int type, const int protocol)
54274+{
54275+ return 1;
54276+}
54277+
54278+int
54279+gr_search_connectbind(const int mode, const struct socket *sock,
54280+ const struct sockaddr_in *addr)
54281+{
54282+ return 0;
54283+}
54284+
54285+void
54286+gr_handle_alertkill(struct task_struct *task)
54287+{
54288+ return;
54289+}
54290+
54291+__u32
54292+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
54293+{
54294+ return 1;
54295+}
54296+
54297+__u32
54298+gr_acl_handle_hidden_file(const struct dentry * dentry,
54299+ const struct vfsmount * mnt)
54300+{
54301+ return 1;
54302+}
54303+
54304+__u32
54305+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54306+ int acc_mode)
54307+{
54308+ return 1;
54309+}
54310+
54311+__u32
54312+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54313+{
54314+ return 1;
54315+}
54316+
54317+__u32
54318+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
54319+{
54320+ return 1;
54321+}
54322+
54323+int
54324+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
54325+ unsigned int *vm_flags)
54326+{
54327+ return 1;
54328+}
54329+
54330+__u32
54331+gr_acl_handle_truncate(const struct dentry * dentry,
54332+ const struct vfsmount * mnt)
54333+{
54334+ return 1;
54335+}
54336+
54337+__u32
54338+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
54339+{
54340+ return 1;
54341+}
54342+
54343+__u32
54344+gr_acl_handle_access(const struct dentry * dentry,
54345+ const struct vfsmount * mnt, const int fmode)
54346+{
54347+ return 1;
54348+}
54349+
54350+__u32
54351+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
54352+ mode_t mode)
54353+{
54354+ return 1;
54355+}
54356+
54357+__u32
54358+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
54359+ mode_t mode)
54360+{
54361+ return 1;
54362+}
54363+
54364+__u32
54365+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
54366+{
54367+ return 1;
54368+}
54369+
54370+__u32
54371+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
54372+{
54373+ return 1;
54374+}
54375+
54376+void
54377+grsecurity_init(void)
54378+{
54379+ return;
54380+}
54381+
54382+__u32
54383+gr_acl_handle_mknod(const struct dentry * new_dentry,
54384+ const struct dentry * parent_dentry,
54385+ const struct vfsmount * parent_mnt,
54386+ const int mode)
54387+{
54388+ return 1;
54389+}
54390+
54391+__u32
54392+gr_acl_handle_mkdir(const struct dentry * new_dentry,
54393+ const struct dentry * parent_dentry,
54394+ const struct vfsmount * parent_mnt)
54395+{
54396+ return 1;
54397+}
54398+
54399+__u32
54400+gr_acl_handle_symlink(const struct dentry * new_dentry,
54401+ const struct dentry * parent_dentry,
54402+ const struct vfsmount * parent_mnt, const char *from)
54403+{
54404+ return 1;
54405+}
54406+
54407+__u32
54408+gr_acl_handle_link(const struct dentry * new_dentry,
54409+ const struct dentry * parent_dentry,
54410+ const struct vfsmount * parent_mnt,
54411+ const struct dentry * old_dentry,
54412+ const struct vfsmount * old_mnt, const char *to)
54413+{
54414+ return 1;
54415+}
54416+
54417+int
54418+gr_acl_handle_rename(const struct dentry *new_dentry,
54419+ const struct dentry *parent_dentry,
54420+ const struct vfsmount *parent_mnt,
54421+ const struct dentry *old_dentry,
54422+ const struct inode *old_parent_inode,
54423+ const struct vfsmount *old_mnt, const char *newname)
54424+{
54425+ return 0;
54426+}
54427+
54428+int
54429+gr_acl_handle_filldir(const struct file *file, const char *name,
54430+ const int namelen, const ino_t ino)
54431+{
54432+ return 1;
54433+}
54434+
54435+int
54436+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54437+ const time_t shm_createtime, const uid_t cuid, const int shmid)
54438+{
54439+ return 1;
54440+}
54441+
54442+int
54443+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
54444+{
54445+ return 0;
54446+}
54447+
54448+int
54449+gr_search_accept(const struct socket *sock)
54450+{
54451+ return 0;
54452+}
54453+
54454+int
54455+gr_search_listen(const struct socket *sock)
54456+{
54457+ return 0;
54458+}
54459+
54460+int
54461+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
54462+{
54463+ return 0;
54464+}
54465+
54466+__u32
54467+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
54468+{
54469+ return 1;
54470+}
54471+
54472+__u32
54473+gr_acl_handle_creat(const struct dentry * dentry,
54474+ const struct dentry * p_dentry,
54475+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54476+ const int imode)
54477+{
54478+ return 1;
54479+}
54480+
54481+void
54482+gr_acl_handle_exit(void)
54483+{
54484+ return;
54485+}
54486+
54487+int
54488+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54489+{
54490+ return 1;
54491+}
54492+
54493+void
54494+gr_set_role_label(const uid_t uid, const gid_t gid)
54495+{
54496+ return;
54497+}
54498+
54499+int
54500+gr_acl_handle_procpidmem(const struct task_struct *task)
54501+{
54502+ return 0;
54503+}
54504+
54505+int
54506+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
54507+{
54508+ return 0;
54509+}
54510+
54511+int
54512+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
54513+{
54514+ return 0;
54515+}
54516+
54517+void
54518+gr_set_kernel_label(struct task_struct *task)
54519+{
54520+ return;
54521+}
54522+
54523+int
54524+gr_check_user_change(int real, int effective, int fs)
54525+{
54526+ return 0;
54527+}
54528+
54529+int
54530+gr_check_group_change(int real, int effective, int fs)
54531+{
54532+ return 0;
54533+}
54534+
54535+int gr_acl_enable_at_secure(void)
54536+{
54537+ return 0;
54538+}
54539+
54540+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
54541+{
54542+ return dentry->d_inode->i_sb->s_dev;
54543+}
54544+
54545+EXPORT_SYMBOL(gr_learn_resource);
54546+EXPORT_SYMBOL(gr_set_kernel_label);
54547+#ifdef CONFIG_SECURITY
54548+EXPORT_SYMBOL(gr_check_user_change);
54549+EXPORT_SYMBOL(gr_check_group_change);
54550+#endif
54551diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
54552new file mode 100644
54553index 0000000..2b05ada
54554--- /dev/null
54555+++ b/grsecurity/grsec_exec.c
54556@@ -0,0 +1,146 @@
54557+#include <linux/kernel.h>
54558+#include <linux/sched.h>
54559+#include <linux/file.h>
54560+#include <linux/binfmts.h>
54561+#include <linux/fs.h>
54562+#include <linux/types.h>
54563+#include <linux/grdefs.h>
54564+#include <linux/grsecurity.h>
54565+#include <linux/grinternal.h>
54566+#include <linux/capability.h>
54567+#include <linux/module.h>
54568+
54569+#include <asm/uaccess.h>
54570+
54571+#ifdef CONFIG_GRKERNSEC_EXECLOG
54572+static char gr_exec_arg_buf[132];
54573+static DEFINE_MUTEX(gr_exec_arg_mutex);
54574+#endif
54575+
54576+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
54577+
54578+void
54579+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
54580+{
54581+#ifdef CONFIG_GRKERNSEC_EXECLOG
54582+ char *grarg = gr_exec_arg_buf;
54583+ unsigned int i, x, execlen = 0;
54584+ char c;
54585+
54586+ if (!((grsec_enable_execlog && grsec_enable_group &&
54587+ in_group_p(grsec_audit_gid))
54588+ || (grsec_enable_execlog && !grsec_enable_group)))
54589+ return;
54590+
54591+ mutex_lock(&gr_exec_arg_mutex);
54592+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
54593+
54594+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
54595+ const char __user *p;
54596+ unsigned int len;
54597+
54598+ p = get_user_arg_ptr(argv, i);
54599+ if (IS_ERR(p))
54600+ goto log;
54601+
54602+ len = strnlen_user(p, 128 - execlen);
54603+ if (len > 128 - execlen)
54604+ len = 128 - execlen;
54605+ else if (len > 0)
54606+ len--;
54607+ if (copy_from_user(grarg + execlen, p, len))
54608+ goto log;
54609+
54610+ /* rewrite unprintable characters */
54611+ for (x = 0; x < len; x++) {
54612+ c = *(grarg + execlen + x);
54613+ if (c < 32 || c > 126)
54614+ *(grarg + execlen + x) = ' ';
54615+ }
54616+
54617+ execlen += len;
54618+ *(grarg + execlen) = ' ';
54619+ *(grarg + execlen + 1) = '\0';
54620+ execlen++;
54621+ }
54622+
54623+ log:
54624+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
54625+ bprm->file->f_path.mnt, grarg);
54626+ mutex_unlock(&gr_exec_arg_mutex);
54627+#endif
54628+ return;
54629+}
54630+
54631+#ifdef CONFIG_GRKERNSEC
54632+extern int gr_acl_is_capable(const int cap);
54633+extern int gr_acl_is_capable_nolog(const int cap);
54634+extern int gr_chroot_is_capable(const int cap);
54635+extern int gr_chroot_is_capable_nolog(const int cap);
54636+#endif
54637+
54638+const char *captab_log[] = {
54639+ "CAP_CHOWN",
54640+ "CAP_DAC_OVERRIDE",
54641+ "CAP_DAC_READ_SEARCH",
54642+ "CAP_FOWNER",
54643+ "CAP_FSETID",
54644+ "CAP_KILL",
54645+ "CAP_SETGID",
54646+ "CAP_SETUID",
54647+ "CAP_SETPCAP",
54648+ "CAP_LINUX_IMMUTABLE",
54649+ "CAP_NET_BIND_SERVICE",
54650+ "CAP_NET_BROADCAST",
54651+ "CAP_NET_ADMIN",
54652+ "CAP_NET_RAW",
54653+ "CAP_IPC_LOCK",
54654+ "CAP_IPC_OWNER",
54655+ "CAP_SYS_MODULE",
54656+ "CAP_SYS_RAWIO",
54657+ "CAP_SYS_CHROOT",
54658+ "CAP_SYS_PTRACE",
54659+ "CAP_SYS_PACCT",
54660+ "CAP_SYS_ADMIN",
54661+ "CAP_SYS_BOOT",
54662+ "CAP_SYS_NICE",
54663+ "CAP_SYS_RESOURCE",
54664+ "CAP_SYS_TIME",
54665+ "CAP_SYS_TTY_CONFIG",
54666+ "CAP_MKNOD",
54667+ "CAP_LEASE",
54668+ "CAP_AUDIT_WRITE",
54669+ "CAP_AUDIT_CONTROL",
54670+ "CAP_SETFCAP",
54671+ "CAP_MAC_OVERRIDE",
54672+ "CAP_MAC_ADMIN",
54673+ "CAP_SYSLOG",
54674+ "CAP_WAKE_ALARM"
54675+};
54676+
54677+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
54678+
54679+int gr_is_capable(const int cap)
54680+{
54681+#ifdef CONFIG_GRKERNSEC
54682+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
54683+ return 1;
54684+ return 0;
54685+#else
54686+ return 1;
54687+#endif
54688+}
54689+
54690+int gr_is_capable_nolog(const int cap)
54691+{
54692+#ifdef CONFIG_GRKERNSEC
54693+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
54694+ return 1;
54695+ return 0;
54696+#else
54697+ return 1;
54698+#endif
54699+}
54700+
54701+EXPORT_SYMBOL(gr_is_capable);
54702+EXPORT_SYMBOL(gr_is_capable_nolog);
54703diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
54704new file mode 100644
54705index 0000000..d3ee748
54706--- /dev/null
54707+++ b/grsecurity/grsec_fifo.c
54708@@ -0,0 +1,24 @@
54709+#include <linux/kernel.h>
54710+#include <linux/sched.h>
54711+#include <linux/fs.h>
54712+#include <linux/file.h>
54713+#include <linux/grinternal.h>
54714+
54715+int
54716+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
54717+ const struct dentry *dir, const int flag, const int acc_mode)
54718+{
54719+#ifdef CONFIG_GRKERNSEC_FIFO
54720+ const struct cred *cred = current_cred();
54721+
54722+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
54723+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
54724+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
54725+ (cred->fsuid != dentry->d_inode->i_uid)) {
54726+ if (!inode_permission(dentry->d_inode, acc_mode))
54727+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
54728+ return -EACCES;
54729+ }
54730+#endif
54731+ return 0;
54732+}
54733diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
54734new file mode 100644
54735index 0000000..8ca18bf
54736--- /dev/null
54737+++ b/grsecurity/grsec_fork.c
54738@@ -0,0 +1,23 @@
54739+#include <linux/kernel.h>
54740+#include <linux/sched.h>
54741+#include <linux/grsecurity.h>
54742+#include <linux/grinternal.h>
54743+#include <linux/errno.h>
54744+
54745+void
54746+gr_log_forkfail(const int retval)
54747+{
54748+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54749+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
54750+ switch (retval) {
54751+ case -EAGAIN:
54752+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
54753+ break;
54754+ case -ENOMEM:
54755+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
54756+ break;
54757+ }
54758+ }
54759+#endif
54760+ return;
54761+}
54762diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
54763new file mode 100644
54764index 0000000..01ddde4
54765--- /dev/null
54766+++ b/grsecurity/grsec_init.c
54767@@ -0,0 +1,277 @@
54768+#include <linux/kernel.h>
54769+#include <linux/sched.h>
54770+#include <linux/mm.h>
54771+#include <linux/gracl.h>
54772+#include <linux/slab.h>
54773+#include <linux/vmalloc.h>
54774+#include <linux/percpu.h>
54775+#include <linux/module.h>
54776+
54777+int grsec_enable_ptrace_readexec;
54778+int grsec_enable_setxid;
54779+int grsec_enable_brute;
54780+int grsec_enable_link;
54781+int grsec_enable_dmesg;
54782+int grsec_enable_harden_ptrace;
54783+int grsec_enable_fifo;
54784+int grsec_enable_execlog;
54785+int grsec_enable_signal;
54786+int grsec_enable_forkfail;
54787+int grsec_enable_audit_ptrace;
54788+int grsec_enable_time;
54789+int grsec_enable_audit_textrel;
54790+int grsec_enable_group;
54791+int grsec_audit_gid;
54792+int grsec_enable_chdir;
54793+int grsec_enable_mount;
54794+int grsec_enable_rofs;
54795+int grsec_enable_chroot_findtask;
54796+int grsec_enable_chroot_mount;
54797+int grsec_enable_chroot_shmat;
54798+int grsec_enable_chroot_fchdir;
54799+int grsec_enable_chroot_double;
54800+int grsec_enable_chroot_pivot;
54801+int grsec_enable_chroot_chdir;
54802+int grsec_enable_chroot_chmod;
54803+int grsec_enable_chroot_mknod;
54804+int grsec_enable_chroot_nice;
54805+int grsec_enable_chroot_execlog;
54806+int grsec_enable_chroot_caps;
54807+int grsec_enable_chroot_sysctl;
54808+int grsec_enable_chroot_unix;
54809+int grsec_enable_tpe;
54810+int grsec_tpe_gid;
54811+int grsec_enable_blackhole;
54812+#ifdef CONFIG_IPV6_MODULE
54813+EXPORT_SYMBOL(grsec_enable_blackhole);
54814+#endif
54815+int grsec_lastack_retries;
54816+int grsec_enable_tpe_all;
54817+int grsec_enable_tpe_invert;
54818+int grsec_enable_socket_all;
54819+int grsec_socket_all_gid;
54820+int grsec_enable_socket_client;
54821+int grsec_socket_client_gid;
54822+int grsec_enable_socket_server;
54823+int grsec_socket_server_gid;
54824+int grsec_resource_logging;
54825+int grsec_disable_privio;
54826+int grsec_enable_log_rwxmaps;
54827+int grsec_lock;
54828+
54829+DEFINE_SPINLOCK(grsec_alert_lock);
54830+unsigned long grsec_alert_wtime = 0;
54831+unsigned long grsec_alert_fyet = 0;
54832+
54833+DEFINE_SPINLOCK(grsec_audit_lock);
54834+
54835+DEFINE_RWLOCK(grsec_exec_file_lock);
54836+
54837+char *gr_shared_page[4];
54838+
54839+char *gr_alert_log_fmt;
54840+char *gr_audit_log_fmt;
54841+char *gr_alert_log_buf;
54842+char *gr_audit_log_buf;
54843+
54844+extern struct gr_arg *gr_usermode;
54845+extern unsigned char *gr_system_salt;
54846+extern unsigned char *gr_system_sum;
54847+
54848+void __init
54849+grsecurity_init(void)
54850+{
54851+ int j;
54852+ /* create the per-cpu shared pages */
54853+
54854+#ifdef CONFIG_X86
54855+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
54856+#endif
54857+
54858+ for (j = 0; j < 4; j++) {
54859+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
54860+ if (gr_shared_page[j] == NULL) {
54861+ panic("Unable to allocate grsecurity shared page");
54862+ return;
54863+ }
54864+ }
54865+
54866+ /* allocate log buffers */
54867+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
54868+ if (!gr_alert_log_fmt) {
54869+ panic("Unable to allocate grsecurity alert log format buffer");
54870+ return;
54871+ }
54872+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
54873+ if (!gr_audit_log_fmt) {
54874+ panic("Unable to allocate grsecurity audit log format buffer");
54875+ return;
54876+ }
54877+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54878+ if (!gr_alert_log_buf) {
54879+ panic("Unable to allocate grsecurity alert log buffer");
54880+ return;
54881+ }
54882+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54883+ if (!gr_audit_log_buf) {
54884+ panic("Unable to allocate grsecurity audit log buffer");
54885+ return;
54886+ }
54887+
54888+ /* allocate memory for authentication structure */
54889+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
54890+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
54891+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
54892+
54893+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
54894+ panic("Unable to allocate grsecurity authentication structure");
54895+ return;
54896+ }
54897+
54898+
54899+#ifdef CONFIG_GRKERNSEC_IO
54900+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
54901+ grsec_disable_privio = 1;
54902+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54903+ grsec_disable_privio = 1;
54904+#else
54905+ grsec_disable_privio = 0;
54906+#endif
54907+#endif
54908+
54909+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
54910+ /* for backward compatibility, tpe_invert always defaults to on if
54911+ enabled in the kernel
54912+ */
54913+ grsec_enable_tpe_invert = 1;
54914+#endif
54915+
54916+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54917+#ifndef CONFIG_GRKERNSEC_SYSCTL
54918+ grsec_lock = 1;
54919+#endif
54920+
54921+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54922+ grsec_enable_audit_textrel = 1;
54923+#endif
54924+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54925+ grsec_enable_log_rwxmaps = 1;
54926+#endif
54927+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
54928+ grsec_enable_group = 1;
54929+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
54930+#endif
54931+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
54932+ grsec_enable_ptrace_readexec = 1;
54933+#endif
54934+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54935+ grsec_enable_chdir = 1;
54936+#endif
54937+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54938+ grsec_enable_harden_ptrace = 1;
54939+#endif
54940+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54941+ grsec_enable_mount = 1;
54942+#endif
54943+#ifdef CONFIG_GRKERNSEC_LINK
54944+ grsec_enable_link = 1;
54945+#endif
54946+#ifdef CONFIG_GRKERNSEC_BRUTE
54947+ grsec_enable_brute = 1;
54948+#endif
54949+#ifdef CONFIG_GRKERNSEC_DMESG
54950+ grsec_enable_dmesg = 1;
54951+#endif
54952+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54953+ grsec_enable_blackhole = 1;
54954+ grsec_lastack_retries = 4;
54955+#endif
54956+#ifdef CONFIG_GRKERNSEC_FIFO
54957+ grsec_enable_fifo = 1;
54958+#endif
54959+#ifdef CONFIG_GRKERNSEC_EXECLOG
54960+ grsec_enable_execlog = 1;
54961+#endif
54962+#ifdef CONFIG_GRKERNSEC_SETXID
54963+ grsec_enable_setxid = 1;
54964+#endif
54965+#ifdef CONFIG_GRKERNSEC_SIGNAL
54966+ grsec_enable_signal = 1;
54967+#endif
54968+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54969+ grsec_enable_forkfail = 1;
54970+#endif
54971+#ifdef CONFIG_GRKERNSEC_TIME
54972+ grsec_enable_time = 1;
54973+#endif
54974+#ifdef CONFIG_GRKERNSEC_RESLOG
54975+ grsec_resource_logging = 1;
54976+#endif
54977+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54978+ grsec_enable_chroot_findtask = 1;
54979+#endif
54980+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54981+ grsec_enable_chroot_unix = 1;
54982+#endif
54983+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54984+ grsec_enable_chroot_mount = 1;
54985+#endif
54986+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54987+ grsec_enable_chroot_fchdir = 1;
54988+#endif
54989+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54990+ grsec_enable_chroot_shmat = 1;
54991+#endif
54992+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54993+ grsec_enable_audit_ptrace = 1;
54994+#endif
54995+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54996+ grsec_enable_chroot_double = 1;
54997+#endif
54998+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54999+ grsec_enable_chroot_pivot = 1;
55000+#endif
55001+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55002+ grsec_enable_chroot_chdir = 1;
55003+#endif
55004+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55005+ grsec_enable_chroot_chmod = 1;
55006+#endif
55007+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55008+ grsec_enable_chroot_mknod = 1;
55009+#endif
55010+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55011+ grsec_enable_chroot_nice = 1;
55012+#endif
55013+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55014+ grsec_enable_chroot_execlog = 1;
55015+#endif
55016+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55017+ grsec_enable_chroot_caps = 1;
55018+#endif
55019+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55020+ grsec_enable_chroot_sysctl = 1;
55021+#endif
55022+#ifdef CONFIG_GRKERNSEC_TPE
55023+ grsec_enable_tpe = 1;
55024+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
55025+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55026+ grsec_enable_tpe_all = 1;
55027+#endif
55028+#endif
55029+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55030+ grsec_enable_socket_all = 1;
55031+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
55032+#endif
55033+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55034+ grsec_enable_socket_client = 1;
55035+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
55036+#endif
55037+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55038+ grsec_enable_socket_server = 1;
55039+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
55040+#endif
55041+#endif
55042+
55043+ return;
55044+}
55045diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
55046new file mode 100644
55047index 0000000..3efe141
55048--- /dev/null
55049+++ b/grsecurity/grsec_link.c
55050@@ -0,0 +1,43 @@
55051+#include <linux/kernel.h>
55052+#include <linux/sched.h>
55053+#include <linux/fs.h>
55054+#include <linux/file.h>
55055+#include <linux/grinternal.h>
55056+
55057+int
55058+gr_handle_follow_link(const struct inode *parent,
55059+ const struct inode *inode,
55060+ const struct dentry *dentry, const struct vfsmount *mnt)
55061+{
55062+#ifdef CONFIG_GRKERNSEC_LINK
55063+ const struct cred *cred = current_cred();
55064+
55065+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
55066+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
55067+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
55068+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
55069+ return -EACCES;
55070+ }
55071+#endif
55072+ return 0;
55073+}
55074+
55075+int
55076+gr_handle_hardlink(const struct dentry *dentry,
55077+ const struct vfsmount *mnt,
55078+ struct inode *inode, const int mode, const char *to)
55079+{
55080+#ifdef CONFIG_GRKERNSEC_LINK
55081+ const struct cred *cred = current_cred();
55082+
55083+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
55084+ (!S_ISREG(mode) || (mode & S_ISUID) ||
55085+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
55086+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
55087+ !capable(CAP_FOWNER) && cred->uid) {
55088+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
55089+ return -EPERM;
55090+ }
55091+#endif
55092+ return 0;
55093+}
55094diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
55095new file mode 100644
55096index 0000000..a45d2e9
55097--- /dev/null
55098+++ b/grsecurity/grsec_log.c
55099@@ -0,0 +1,322 @@
55100+#include <linux/kernel.h>
55101+#include <linux/sched.h>
55102+#include <linux/file.h>
55103+#include <linux/tty.h>
55104+#include <linux/fs.h>
55105+#include <linux/grinternal.h>
55106+
55107+#ifdef CONFIG_TREE_PREEMPT_RCU
55108+#define DISABLE_PREEMPT() preempt_disable()
55109+#define ENABLE_PREEMPT() preempt_enable()
55110+#else
55111+#define DISABLE_PREEMPT()
55112+#define ENABLE_PREEMPT()
55113+#endif
55114+
55115+#define BEGIN_LOCKS(x) \
55116+ DISABLE_PREEMPT(); \
55117+ rcu_read_lock(); \
55118+ read_lock(&tasklist_lock); \
55119+ read_lock(&grsec_exec_file_lock); \
55120+ if (x != GR_DO_AUDIT) \
55121+ spin_lock(&grsec_alert_lock); \
55122+ else \
55123+ spin_lock(&grsec_audit_lock)
55124+
55125+#define END_LOCKS(x) \
55126+ if (x != GR_DO_AUDIT) \
55127+ spin_unlock(&grsec_alert_lock); \
55128+ else \
55129+ spin_unlock(&grsec_audit_lock); \
55130+ read_unlock(&grsec_exec_file_lock); \
55131+ read_unlock(&tasklist_lock); \
55132+ rcu_read_unlock(); \
55133+ ENABLE_PREEMPT(); \
55134+ if (x == GR_DONT_AUDIT) \
55135+ gr_handle_alertkill(current)
55136+
55137+enum {
55138+ FLOODING,
55139+ NO_FLOODING
55140+};
55141+
55142+extern char *gr_alert_log_fmt;
55143+extern char *gr_audit_log_fmt;
55144+extern char *gr_alert_log_buf;
55145+extern char *gr_audit_log_buf;
55146+
55147+static int gr_log_start(int audit)
55148+{
55149+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
55150+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
55151+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55152+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
55153+ unsigned long curr_secs = get_seconds();
55154+
55155+ if (audit == GR_DO_AUDIT)
55156+ goto set_fmt;
55157+
55158+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
55159+ grsec_alert_wtime = curr_secs;
55160+ grsec_alert_fyet = 0;
55161+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
55162+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
55163+ grsec_alert_fyet++;
55164+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
55165+ grsec_alert_wtime = curr_secs;
55166+ grsec_alert_fyet++;
55167+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
55168+ return FLOODING;
55169+ }
55170+ else return FLOODING;
55171+
55172+set_fmt:
55173+#endif
55174+ memset(buf, 0, PAGE_SIZE);
55175+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
55176+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
55177+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55178+ } else if (current->signal->curr_ip) {
55179+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
55180+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
55181+ } else if (gr_acl_is_enabled()) {
55182+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
55183+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55184+ } else {
55185+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
55186+ strcpy(buf, fmt);
55187+ }
55188+
55189+ return NO_FLOODING;
55190+}
55191+
55192+static void gr_log_middle(int audit, const char *msg, va_list ap)
55193+ __attribute__ ((format (printf, 2, 0)));
55194+
55195+static void gr_log_middle(int audit, const char *msg, va_list ap)
55196+{
55197+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55198+ unsigned int len = strlen(buf);
55199+
55200+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55201+
55202+ return;
55203+}
55204+
55205+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55206+ __attribute__ ((format (printf, 2, 3)));
55207+
55208+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55209+{
55210+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55211+ unsigned int len = strlen(buf);
55212+ va_list ap;
55213+
55214+ va_start(ap, msg);
55215+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55216+ va_end(ap);
55217+
55218+ return;
55219+}
55220+
55221+static void gr_log_end(int audit, int append_default)
55222+{
55223+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55224+
55225+ if (append_default) {
55226+ unsigned int len = strlen(buf);
55227+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
55228+ }
55229+
55230+ printk("%s\n", buf);
55231+
55232+ return;
55233+}
55234+
55235+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
55236+{
55237+ int logtype;
55238+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
55239+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
55240+ void *voidptr = NULL;
55241+ int num1 = 0, num2 = 0;
55242+ unsigned long ulong1 = 0, ulong2 = 0;
55243+ struct dentry *dentry = NULL;
55244+ struct vfsmount *mnt = NULL;
55245+ struct file *file = NULL;
55246+ struct task_struct *task = NULL;
55247+ const struct cred *cred, *pcred;
55248+ va_list ap;
55249+
55250+ BEGIN_LOCKS(audit);
55251+ logtype = gr_log_start(audit);
55252+ if (logtype == FLOODING) {
55253+ END_LOCKS(audit);
55254+ return;
55255+ }
55256+ va_start(ap, argtypes);
55257+ switch (argtypes) {
55258+ case GR_TTYSNIFF:
55259+ task = va_arg(ap, struct task_struct *);
55260+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
55261+ break;
55262+ case GR_SYSCTL_HIDDEN:
55263+ str1 = va_arg(ap, char *);
55264+ gr_log_middle_varargs(audit, msg, result, str1);
55265+ break;
55266+ case GR_RBAC:
55267+ dentry = va_arg(ap, struct dentry *);
55268+ mnt = va_arg(ap, struct vfsmount *);
55269+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
55270+ break;
55271+ case GR_RBAC_STR:
55272+ dentry = va_arg(ap, struct dentry *);
55273+ mnt = va_arg(ap, struct vfsmount *);
55274+ str1 = va_arg(ap, char *);
55275+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
55276+ break;
55277+ case GR_STR_RBAC:
55278+ str1 = va_arg(ap, char *);
55279+ dentry = va_arg(ap, struct dentry *);
55280+ mnt = va_arg(ap, struct vfsmount *);
55281+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
55282+ break;
55283+ case GR_RBAC_MODE2:
55284+ dentry = va_arg(ap, struct dentry *);
55285+ mnt = va_arg(ap, struct vfsmount *);
55286+ str1 = va_arg(ap, char *);
55287+ str2 = va_arg(ap, char *);
55288+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
55289+ break;
55290+ case GR_RBAC_MODE3:
55291+ dentry = va_arg(ap, struct dentry *);
55292+ mnt = va_arg(ap, struct vfsmount *);
55293+ str1 = va_arg(ap, char *);
55294+ str2 = va_arg(ap, char *);
55295+ str3 = va_arg(ap, char *);
55296+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
55297+ break;
55298+ case GR_FILENAME:
55299+ dentry = va_arg(ap, struct dentry *);
55300+ mnt = va_arg(ap, struct vfsmount *);
55301+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
55302+ break;
55303+ case GR_STR_FILENAME:
55304+ str1 = va_arg(ap, char *);
55305+ dentry = va_arg(ap, struct dentry *);
55306+ mnt = va_arg(ap, struct vfsmount *);
55307+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
55308+ break;
55309+ case GR_FILENAME_STR:
55310+ dentry = va_arg(ap, struct dentry *);
55311+ mnt = va_arg(ap, struct vfsmount *);
55312+ str1 = va_arg(ap, char *);
55313+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
55314+ break;
55315+ case GR_FILENAME_TWO_INT:
55316+ dentry = va_arg(ap, struct dentry *);
55317+ mnt = va_arg(ap, struct vfsmount *);
55318+ num1 = va_arg(ap, int);
55319+ num2 = va_arg(ap, int);
55320+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
55321+ break;
55322+ case GR_FILENAME_TWO_INT_STR:
55323+ dentry = va_arg(ap, struct dentry *);
55324+ mnt = va_arg(ap, struct vfsmount *);
55325+ num1 = va_arg(ap, int);
55326+ num2 = va_arg(ap, int);
55327+ str1 = va_arg(ap, char *);
55328+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
55329+ break;
55330+ case GR_TEXTREL:
55331+ file = va_arg(ap, struct file *);
55332+ ulong1 = va_arg(ap, unsigned long);
55333+ ulong2 = va_arg(ap, unsigned long);
55334+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
55335+ break;
55336+ case GR_PTRACE:
55337+ task = va_arg(ap, struct task_struct *);
55338+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
55339+ break;
55340+ case GR_RESOURCE:
55341+ task = va_arg(ap, struct task_struct *);
55342+ cred = __task_cred(task);
55343+ pcred = __task_cred(task->real_parent);
55344+ ulong1 = va_arg(ap, unsigned long);
55345+ str1 = va_arg(ap, char *);
55346+ ulong2 = va_arg(ap, unsigned long);
55347+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55348+ break;
55349+ case GR_CAP:
55350+ task = va_arg(ap, struct task_struct *);
55351+ cred = __task_cred(task);
55352+ pcred = __task_cred(task->real_parent);
55353+ str1 = va_arg(ap, char *);
55354+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55355+ break;
55356+ case GR_SIG:
55357+ str1 = va_arg(ap, char *);
55358+ voidptr = va_arg(ap, void *);
55359+ gr_log_middle_varargs(audit, msg, str1, voidptr);
55360+ break;
55361+ case GR_SIG2:
55362+ task = va_arg(ap, struct task_struct *);
55363+ cred = __task_cred(task);
55364+ pcred = __task_cred(task->real_parent);
55365+ num1 = va_arg(ap, int);
55366+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55367+ break;
55368+ case GR_CRASH1:
55369+ task = va_arg(ap, struct task_struct *);
55370+ cred = __task_cred(task);
55371+ pcred = __task_cred(task->real_parent);
55372+ ulong1 = va_arg(ap, unsigned long);
55373+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
55374+ break;
55375+ case GR_CRASH2:
55376+ task = va_arg(ap, struct task_struct *);
55377+ cred = __task_cred(task);
55378+ pcred = __task_cred(task->real_parent);
55379+ ulong1 = va_arg(ap, unsigned long);
55380+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
55381+ break;
55382+ case GR_RWXMAP:
55383+ file = va_arg(ap, struct file *);
55384+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
55385+ break;
55386+ case GR_PSACCT:
55387+ {
55388+ unsigned int wday, cday;
55389+ __u8 whr, chr;
55390+ __u8 wmin, cmin;
55391+ __u8 wsec, csec;
55392+ char cur_tty[64] = { 0 };
55393+ char parent_tty[64] = { 0 };
55394+
55395+ task = va_arg(ap, struct task_struct *);
55396+ wday = va_arg(ap, unsigned int);
55397+ cday = va_arg(ap, unsigned int);
55398+ whr = va_arg(ap, int);
55399+ chr = va_arg(ap, int);
55400+ wmin = va_arg(ap, int);
55401+ cmin = va_arg(ap, int);
55402+ wsec = va_arg(ap, int);
55403+ csec = va_arg(ap, int);
55404+ ulong1 = va_arg(ap, unsigned long);
55405+ cred = __task_cred(task);
55406+ pcred = __task_cred(task->real_parent);
55407+
55408+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55409+ }
55410+ break;
55411+ default:
55412+ gr_log_middle(audit, msg, ap);
55413+ }
55414+ va_end(ap);
55415+ // these don't need DEFAULTSECARGS printed on the end
55416+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
55417+ gr_log_end(audit, 0);
55418+ else
55419+ gr_log_end(audit, 1);
55420+ END_LOCKS(audit);
55421+}
55422diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
55423new file mode 100644
55424index 0000000..6c0416b
55425--- /dev/null
55426+++ b/grsecurity/grsec_mem.c
55427@@ -0,0 +1,33 @@
55428+#include <linux/kernel.h>
55429+#include <linux/sched.h>
55430+#include <linux/mm.h>
55431+#include <linux/mman.h>
55432+#include <linux/grinternal.h>
55433+
55434+void
55435+gr_handle_ioperm(void)
55436+{
55437+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
55438+ return;
55439+}
55440+
55441+void
55442+gr_handle_iopl(void)
55443+{
55444+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
55445+ return;
55446+}
55447+
55448+void
55449+gr_handle_mem_readwrite(u64 from, u64 to)
55450+{
55451+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
55452+ return;
55453+}
55454+
55455+void
55456+gr_handle_vm86(void)
55457+{
55458+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
55459+ return;
55460+}
55461diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
55462new file mode 100644
55463index 0000000..2131422
55464--- /dev/null
55465+++ b/grsecurity/grsec_mount.c
55466@@ -0,0 +1,62 @@
55467+#include <linux/kernel.h>
55468+#include <linux/sched.h>
55469+#include <linux/mount.h>
55470+#include <linux/grsecurity.h>
55471+#include <linux/grinternal.h>
55472+
55473+void
55474+gr_log_remount(const char *devname, const int retval)
55475+{
55476+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55477+ if (grsec_enable_mount && (retval >= 0))
55478+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
55479+#endif
55480+ return;
55481+}
55482+
55483+void
55484+gr_log_unmount(const char *devname, const int retval)
55485+{
55486+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55487+ if (grsec_enable_mount && (retval >= 0))
55488+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
55489+#endif
55490+ return;
55491+}
55492+
55493+void
55494+gr_log_mount(const char *from, const char *to, const int retval)
55495+{
55496+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55497+ if (grsec_enable_mount && (retval >= 0))
55498+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
55499+#endif
55500+ return;
55501+}
55502+
55503+int
55504+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
55505+{
55506+#ifdef CONFIG_GRKERNSEC_ROFS
55507+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
55508+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
55509+ return -EPERM;
55510+ } else
55511+ return 0;
55512+#endif
55513+ return 0;
55514+}
55515+
55516+int
55517+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
55518+{
55519+#ifdef CONFIG_GRKERNSEC_ROFS
55520+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
55521+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
55522+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
55523+ return -EPERM;
55524+ } else
55525+ return 0;
55526+#endif
55527+ return 0;
55528+}
55529diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
55530new file mode 100644
55531index 0000000..a3b12a0
55532--- /dev/null
55533+++ b/grsecurity/grsec_pax.c
55534@@ -0,0 +1,36 @@
55535+#include <linux/kernel.h>
55536+#include <linux/sched.h>
55537+#include <linux/mm.h>
55538+#include <linux/file.h>
55539+#include <linux/grinternal.h>
55540+#include <linux/grsecurity.h>
55541+
55542+void
55543+gr_log_textrel(struct vm_area_struct * vma)
55544+{
55545+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55546+ if (grsec_enable_audit_textrel)
55547+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
55548+#endif
55549+ return;
55550+}
55551+
55552+void
55553+gr_log_rwxmmap(struct file *file)
55554+{
55555+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55556+ if (grsec_enable_log_rwxmaps)
55557+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
55558+#endif
55559+ return;
55560+}
55561+
55562+void
55563+gr_log_rwxmprotect(struct file *file)
55564+{
55565+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55566+ if (grsec_enable_log_rwxmaps)
55567+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
55568+#endif
55569+ return;
55570+}
55571diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
55572new file mode 100644
55573index 0000000..f7f29aa
55574--- /dev/null
55575+++ b/grsecurity/grsec_ptrace.c
55576@@ -0,0 +1,30 @@
55577+#include <linux/kernel.h>
55578+#include <linux/sched.h>
55579+#include <linux/grinternal.h>
55580+#include <linux/security.h>
55581+
55582+void
55583+gr_audit_ptrace(struct task_struct *task)
55584+{
55585+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55586+ if (grsec_enable_audit_ptrace)
55587+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
55588+#endif
55589+ return;
55590+}
55591+
55592+int
55593+gr_ptrace_readexec(struct file *file, int unsafe_flags)
55594+{
55595+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55596+ const struct dentry *dentry = file->f_path.dentry;
55597+ const struct vfsmount *mnt = file->f_path.mnt;
55598+
55599+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
55600+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
55601+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
55602+ return -EACCES;
55603+ }
55604+#endif
55605+ return 0;
55606+}
55607diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
55608new file mode 100644
55609index 0000000..7a5b2de
55610--- /dev/null
55611+++ b/grsecurity/grsec_sig.c
55612@@ -0,0 +1,207 @@
55613+#include <linux/kernel.h>
55614+#include <linux/sched.h>
55615+#include <linux/delay.h>
55616+#include <linux/grsecurity.h>
55617+#include <linux/grinternal.h>
55618+#include <linux/hardirq.h>
55619+
55620+char *signames[] = {
55621+ [SIGSEGV] = "Segmentation fault",
55622+ [SIGILL] = "Illegal instruction",
55623+ [SIGABRT] = "Abort",
55624+ [SIGBUS] = "Invalid alignment/Bus error"
55625+};
55626+
55627+void
55628+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
55629+{
55630+#ifdef CONFIG_GRKERNSEC_SIGNAL
55631+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
55632+ (sig == SIGABRT) || (sig == SIGBUS))) {
55633+ if (t->pid == current->pid) {
55634+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
55635+ } else {
55636+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
55637+ }
55638+ }
55639+#endif
55640+ return;
55641+}
55642+
55643+int
55644+gr_handle_signal(const struct task_struct *p, const int sig)
55645+{
55646+#ifdef CONFIG_GRKERNSEC
55647+ /* ignore the 0 signal for protected task checks */
55648+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
55649+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
55650+ return -EPERM;
55651+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
55652+ return -EPERM;
55653+ }
55654+#endif
55655+ return 0;
55656+}
55657+
55658+#ifdef CONFIG_GRKERNSEC
55659+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
55660+
55661+int gr_fake_force_sig(int sig, struct task_struct *t)
55662+{
55663+ unsigned long int flags;
55664+ int ret, blocked, ignored;
55665+ struct k_sigaction *action;
55666+
55667+ spin_lock_irqsave(&t->sighand->siglock, flags);
55668+ action = &t->sighand->action[sig-1];
55669+ ignored = action->sa.sa_handler == SIG_IGN;
55670+ blocked = sigismember(&t->blocked, sig);
55671+ if (blocked || ignored) {
55672+ action->sa.sa_handler = SIG_DFL;
55673+ if (blocked) {
55674+ sigdelset(&t->blocked, sig);
55675+ recalc_sigpending_and_wake(t);
55676+ }
55677+ }
55678+ if (action->sa.sa_handler == SIG_DFL)
55679+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
55680+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
55681+
55682+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
55683+
55684+ return ret;
55685+}
55686+#endif
55687+
55688+#ifdef CONFIG_GRKERNSEC_BRUTE
55689+#define GR_USER_BAN_TIME (15 * 60)
55690+
55691+static int __get_dumpable(unsigned long mm_flags)
55692+{
55693+ int ret;
55694+
55695+ ret = mm_flags & MMF_DUMPABLE_MASK;
55696+ return (ret >= 2) ? 2 : ret;
55697+}
55698+#endif
55699+
55700+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
55701+{
55702+#ifdef CONFIG_GRKERNSEC_BRUTE
55703+ uid_t uid = 0;
55704+
55705+ if (!grsec_enable_brute)
55706+ return;
55707+
55708+ rcu_read_lock();
55709+ read_lock(&tasklist_lock);
55710+ read_lock(&grsec_exec_file_lock);
55711+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
55712+ p->real_parent->brute = 1;
55713+ else {
55714+ const struct cred *cred = __task_cred(p), *cred2;
55715+ struct task_struct *tsk, *tsk2;
55716+
55717+ if (!__get_dumpable(mm_flags) && cred->uid) {
55718+ struct user_struct *user;
55719+
55720+ uid = cred->uid;
55721+
55722+ /* this is put upon execution past expiration */
55723+ user = find_user(uid);
55724+ if (user == NULL)
55725+ goto unlock;
55726+ user->banned = 1;
55727+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
55728+ if (user->ban_expires == ~0UL)
55729+ user->ban_expires--;
55730+
55731+ do_each_thread(tsk2, tsk) {
55732+ cred2 = __task_cred(tsk);
55733+ if (tsk != p && cred2->uid == uid)
55734+ gr_fake_force_sig(SIGKILL, tsk);
55735+ } while_each_thread(tsk2, tsk);
55736+ }
55737+ }
55738+unlock:
55739+ read_unlock(&grsec_exec_file_lock);
55740+ read_unlock(&tasklist_lock);
55741+ rcu_read_unlock();
55742+
55743+ if (uid)
55744+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
55745+
55746+#endif
55747+ return;
55748+}
55749+
55750+void gr_handle_brute_check(void)
55751+{
55752+#ifdef CONFIG_GRKERNSEC_BRUTE
55753+ if (current->brute)
55754+ msleep(30 * 1000);
55755+#endif
55756+ return;
55757+}
55758+
55759+void gr_handle_kernel_exploit(void)
55760+{
55761+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
55762+ const struct cred *cred;
55763+ struct task_struct *tsk, *tsk2;
55764+ struct user_struct *user;
55765+ uid_t uid;
55766+
55767+ if (in_irq() || in_serving_softirq() || in_nmi())
55768+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
55769+
55770+ uid = current_uid();
55771+
55772+ if (uid == 0)
55773+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
55774+ else {
55775+ /* kill all the processes of this user, hold a reference
55776+ to their creds struct, and prevent them from creating
55777+ another process until system reset
55778+ */
55779+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
55780+ /* we intentionally leak this ref */
55781+ user = get_uid(current->cred->user);
55782+ if (user) {
55783+ user->banned = 1;
55784+ user->ban_expires = ~0UL;
55785+ }
55786+
55787+ read_lock(&tasklist_lock);
55788+ do_each_thread(tsk2, tsk) {
55789+ cred = __task_cred(tsk);
55790+ if (cred->uid == uid)
55791+ gr_fake_force_sig(SIGKILL, tsk);
55792+ } while_each_thread(tsk2, tsk);
55793+ read_unlock(&tasklist_lock);
55794+ }
55795+#endif
55796+}
55797+
55798+int __gr_process_user_ban(struct user_struct *user)
55799+{
55800+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55801+ if (unlikely(user->banned)) {
55802+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
55803+ user->banned = 0;
55804+ user->ban_expires = 0;
55805+ free_uid(user);
55806+ } else
55807+ return -EPERM;
55808+ }
55809+#endif
55810+ return 0;
55811+}
55812+
55813+int gr_process_user_ban(void)
55814+{
55815+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55816+ return __gr_process_user_ban(current->cred->user);
55817+#endif
55818+ return 0;
55819+}
55820diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
55821new file mode 100644
55822index 0000000..4030d57
55823--- /dev/null
55824+++ b/grsecurity/grsec_sock.c
55825@@ -0,0 +1,244 @@
55826+#include <linux/kernel.h>
55827+#include <linux/module.h>
55828+#include <linux/sched.h>
55829+#include <linux/file.h>
55830+#include <linux/net.h>
55831+#include <linux/in.h>
55832+#include <linux/ip.h>
55833+#include <net/sock.h>
55834+#include <net/inet_sock.h>
55835+#include <linux/grsecurity.h>
55836+#include <linux/grinternal.h>
55837+#include <linux/gracl.h>
55838+
55839+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
55840+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
55841+
55842+EXPORT_SYMBOL(gr_search_udp_recvmsg);
55843+EXPORT_SYMBOL(gr_search_udp_sendmsg);
55844+
55845+#ifdef CONFIG_UNIX_MODULE
55846+EXPORT_SYMBOL(gr_acl_handle_unix);
55847+EXPORT_SYMBOL(gr_acl_handle_mknod);
55848+EXPORT_SYMBOL(gr_handle_chroot_unix);
55849+EXPORT_SYMBOL(gr_handle_create);
55850+#endif
55851+
55852+#ifdef CONFIG_GRKERNSEC
55853+#define gr_conn_table_size 32749
55854+struct conn_table_entry {
55855+ struct conn_table_entry *next;
55856+ struct signal_struct *sig;
55857+};
55858+
55859+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
55860+DEFINE_SPINLOCK(gr_conn_table_lock);
55861+
55862+extern const char * gr_socktype_to_name(unsigned char type);
55863+extern const char * gr_proto_to_name(unsigned char proto);
55864+extern const char * gr_sockfamily_to_name(unsigned char family);
55865+
55866+static __inline__ int
55867+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
55868+{
55869+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
55870+}
55871+
55872+static __inline__ int
55873+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
55874+ __u16 sport, __u16 dport)
55875+{
55876+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
55877+ sig->gr_sport == sport && sig->gr_dport == dport))
55878+ return 1;
55879+ else
55880+ return 0;
55881+}
55882+
55883+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
55884+{
55885+ struct conn_table_entry **match;
55886+ unsigned int index;
55887+
55888+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55889+ sig->gr_sport, sig->gr_dport,
55890+ gr_conn_table_size);
55891+
55892+ newent->sig = sig;
55893+
55894+ match = &gr_conn_table[index];
55895+ newent->next = *match;
55896+ *match = newent;
55897+
55898+ return;
55899+}
55900+
55901+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
55902+{
55903+ struct conn_table_entry *match, *last = NULL;
55904+ unsigned int index;
55905+
55906+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55907+ sig->gr_sport, sig->gr_dport,
55908+ gr_conn_table_size);
55909+
55910+ match = gr_conn_table[index];
55911+ while (match && !conn_match(match->sig,
55912+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
55913+ sig->gr_dport)) {
55914+ last = match;
55915+ match = match->next;
55916+ }
55917+
55918+ if (match) {
55919+ if (last)
55920+ last->next = match->next;
55921+ else
55922+ gr_conn_table[index] = NULL;
55923+ kfree(match);
55924+ }
55925+
55926+ return;
55927+}
55928+
55929+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
55930+ __u16 sport, __u16 dport)
55931+{
55932+ struct conn_table_entry *match;
55933+ unsigned int index;
55934+
55935+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
55936+
55937+ match = gr_conn_table[index];
55938+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
55939+ match = match->next;
55940+
55941+ if (match)
55942+ return match->sig;
55943+ else
55944+ return NULL;
55945+}
55946+
55947+#endif
55948+
55949+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
55950+{
55951+#ifdef CONFIG_GRKERNSEC
55952+ struct signal_struct *sig = task->signal;
55953+ struct conn_table_entry *newent;
55954+
55955+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
55956+ if (newent == NULL)
55957+ return;
55958+ /* no bh lock needed since we are called with bh disabled */
55959+ spin_lock(&gr_conn_table_lock);
55960+ gr_del_task_from_ip_table_nolock(sig);
55961+ sig->gr_saddr = inet->inet_rcv_saddr;
55962+ sig->gr_daddr = inet->inet_daddr;
55963+ sig->gr_sport = inet->inet_sport;
55964+ sig->gr_dport = inet->inet_dport;
55965+ gr_add_to_task_ip_table_nolock(sig, newent);
55966+ spin_unlock(&gr_conn_table_lock);
55967+#endif
55968+ return;
55969+}
55970+
55971+void gr_del_task_from_ip_table(struct task_struct *task)
55972+{
55973+#ifdef CONFIG_GRKERNSEC
55974+ spin_lock_bh(&gr_conn_table_lock);
55975+ gr_del_task_from_ip_table_nolock(task->signal);
55976+ spin_unlock_bh(&gr_conn_table_lock);
55977+#endif
55978+ return;
55979+}
55980+
55981+void
55982+gr_attach_curr_ip(const struct sock *sk)
55983+{
55984+#ifdef CONFIG_GRKERNSEC
55985+ struct signal_struct *p, *set;
55986+ const struct inet_sock *inet = inet_sk(sk);
55987+
55988+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
55989+ return;
55990+
55991+ set = current->signal;
55992+
55993+ spin_lock_bh(&gr_conn_table_lock);
55994+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
55995+ inet->inet_dport, inet->inet_sport);
55996+ if (unlikely(p != NULL)) {
55997+ set->curr_ip = p->curr_ip;
55998+ set->used_accept = 1;
55999+ gr_del_task_from_ip_table_nolock(p);
56000+ spin_unlock_bh(&gr_conn_table_lock);
56001+ return;
56002+ }
56003+ spin_unlock_bh(&gr_conn_table_lock);
56004+
56005+ set->curr_ip = inet->inet_daddr;
56006+ set->used_accept = 1;
56007+#endif
56008+ return;
56009+}
56010+
56011+int
56012+gr_handle_sock_all(const int family, const int type, const int protocol)
56013+{
56014+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56015+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
56016+ (family != AF_UNIX)) {
56017+ if (family == AF_INET)
56018+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
56019+ else
56020+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
56021+ return -EACCES;
56022+ }
56023+#endif
56024+ return 0;
56025+}
56026+
56027+int
56028+gr_handle_sock_server(const struct sockaddr *sck)
56029+{
56030+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56031+ if (grsec_enable_socket_server &&
56032+ in_group_p(grsec_socket_server_gid) &&
56033+ sck && (sck->sa_family != AF_UNIX) &&
56034+ (sck->sa_family != AF_LOCAL)) {
56035+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56036+ return -EACCES;
56037+ }
56038+#endif
56039+ return 0;
56040+}
56041+
56042+int
56043+gr_handle_sock_server_other(const struct sock *sck)
56044+{
56045+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56046+ if (grsec_enable_socket_server &&
56047+ in_group_p(grsec_socket_server_gid) &&
56048+ sck && (sck->sk_family != AF_UNIX) &&
56049+ (sck->sk_family != AF_LOCAL)) {
56050+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56051+ return -EACCES;
56052+ }
56053+#endif
56054+ return 0;
56055+}
56056+
56057+int
56058+gr_handle_sock_client(const struct sockaddr *sck)
56059+{
56060+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56061+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
56062+ sck && (sck->sa_family != AF_UNIX) &&
56063+ (sck->sa_family != AF_LOCAL)) {
56064+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
56065+ return -EACCES;
56066+ }
56067+#endif
56068+ return 0;
56069+}
56070diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
56071new file mode 100644
56072index 0000000..a1aedd7
56073--- /dev/null
56074+++ b/grsecurity/grsec_sysctl.c
56075@@ -0,0 +1,451 @@
56076+#include <linux/kernel.h>
56077+#include <linux/sched.h>
56078+#include <linux/sysctl.h>
56079+#include <linux/grsecurity.h>
56080+#include <linux/grinternal.h>
56081+
56082+int
56083+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
56084+{
56085+#ifdef CONFIG_GRKERNSEC_SYSCTL
56086+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
56087+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
56088+ return -EACCES;
56089+ }
56090+#endif
56091+ return 0;
56092+}
56093+
56094+#ifdef CONFIG_GRKERNSEC_ROFS
56095+static int __maybe_unused one = 1;
56096+#endif
56097+
56098+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
56099+struct ctl_table grsecurity_table[] = {
56100+#ifdef CONFIG_GRKERNSEC_SYSCTL
56101+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
56102+#ifdef CONFIG_GRKERNSEC_IO
56103+ {
56104+ .procname = "disable_priv_io",
56105+ .data = &grsec_disable_privio,
56106+ .maxlen = sizeof(int),
56107+ .mode = 0600,
56108+ .proc_handler = &proc_dointvec,
56109+ },
56110+#endif
56111+#endif
56112+#ifdef CONFIG_GRKERNSEC_LINK
56113+ {
56114+ .procname = "linking_restrictions",
56115+ .data = &grsec_enable_link,
56116+ .maxlen = sizeof(int),
56117+ .mode = 0600,
56118+ .proc_handler = &proc_dointvec,
56119+ },
56120+#endif
56121+#ifdef CONFIG_GRKERNSEC_BRUTE
56122+ {
56123+ .procname = "deter_bruteforce",
56124+ .data = &grsec_enable_brute,
56125+ .maxlen = sizeof(int),
56126+ .mode = 0600,
56127+ .proc_handler = &proc_dointvec,
56128+ },
56129+#endif
56130+#ifdef CONFIG_GRKERNSEC_FIFO
56131+ {
56132+ .procname = "fifo_restrictions",
56133+ .data = &grsec_enable_fifo,
56134+ .maxlen = sizeof(int),
56135+ .mode = 0600,
56136+ .proc_handler = &proc_dointvec,
56137+ },
56138+#endif
56139+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56140+ {
56141+ .procname = "ptrace_readexec",
56142+ .data = &grsec_enable_ptrace_readexec,
56143+ .maxlen = sizeof(int),
56144+ .mode = 0600,
56145+ .proc_handler = &proc_dointvec,
56146+ },
56147+#endif
56148+#ifdef CONFIG_GRKERNSEC_SETXID
56149+ {
56150+ .procname = "consistent_setxid",
56151+ .data = &grsec_enable_setxid,
56152+ .maxlen = sizeof(int),
56153+ .mode = 0600,
56154+ .proc_handler = &proc_dointvec,
56155+ },
56156+#endif
56157+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56158+ {
56159+ .procname = "ip_blackhole",
56160+ .data = &grsec_enable_blackhole,
56161+ .maxlen = sizeof(int),
56162+ .mode = 0600,
56163+ .proc_handler = &proc_dointvec,
56164+ },
56165+ {
56166+ .procname = "lastack_retries",
56167+ .data = &grsec_lastack_retries,
56168+ .maxlen = sizeof(int),
56169+ .mode = 0600,
56170+ .proc_handler = &proc_dointvec,
56171+ },
56172+#endif
56173+#ifdef CONFIG_GRKERNSEC_EXECLOG
56174+ {
56175+ .procname = "exec_logging",
56176+ .data = &grsec_enable_execlog,
56177+ .maxlen = sizeof(int),
56178+ .mode = 0600,
56179+ .proc_handler = &proc_dointvec,
56180+ },
56181+#endif
56182+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56183+ {
56184+ .procname = "rwxmap_logging",
56185+ .data = &grsec_enable_log_rwxmaps,
56186+ .maxlen = sizeof(int),
56187+ .mode = 0600,
56188+ .proc_handler = &proc_dointvec,
56189+ },
56190+#endif
56191+#ifdef CONFIG_GRKERNSEC_SIGNAL
56192+ {
56193+ .procname = "signal_logging",
56194+ .data = &grsec_enable_signal,
56195+ .maxlen = sizeof(int),
56196+ .mode = 0600,
56197+ .proc_handler = &proc_dointvec,
56198+ },
56199+#endif
56200+#ifdef CONFIG_GRKERNSEC_FORKFAIL
56201+ {
56202+ .procname = "forkfail_logging",
56203+ .data = &grsec_enable_forkfail,
56204+ .maxlen = sizeof(int),
56205+ .mode = 0600,
56206+ .proc_handler = &proc_dointvec,
56207+ },
56208+#endif
56209+#ifdef CONFIG_GRKERNSEC_TIME
56210+ {
56211+ .procname = "timechange_logging",
56212+ .data = &grsec_enable_time,
56213+ .maxlen = sizeof(int),
56214+ .mode = 0600,
56215+ .proc_handler = &proc_dointvec,
56216+ },
56217+#endif
56218+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56219+ {
56220+ .procname = "chroot_deny_shmat",
56221+ .data = &grsec_enable_chroot_shmat,
56222+ .maxlen = sizeof(int),
56223+ .mode = 0600,
56224+ .proc_handler = &proc_dointvec,
56225+ },
56226+#endif
56227+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56228+ {
56229+ .procname = "chroot_deny_unix",
56230+ .data = &grsec_enable_chroot_unix,
56231+ .maxlen = sizeof(int),
56232+ .mode = 0600,
56233+ .proc_handler = &proc_dointvec,
56234+ },
56235+#endif
56236+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56237+ {
56238+ .procname = "chroot_deny_mount",
56239+ .data = &grsec_enable_chroot_mount,
56240+ .maxlen = sizeof(int),
56241+ .mode = 0600,
56242+ .proc_handler = &proc_dointvec,
56243+ },
56244+#endif
56245+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56246+ {
56247+ .procname = "chroot_deny_fchdir",
56248+ .data = &grsec_enable_chroot_fchdir,
56249+ .maxlen = sizeof(int),
56250+ .mode = 0600,
56251+ .proc_handler = &proc_dointvec,
56252+ },
56253+#endif
56254+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56255+ {
56256+ .procname = "chroot_deny_chroot",
56257+ .data = &grsec_enable_chroot_double,
56258+ .maxlen = sizeof(int),
56259+ .mode = 0600,
56260+ .proc_handler = &proc_dointvec,
56261+ },
56262+#endif
56263+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56264+ {
56265+ .procname = "chroot_deny_pivot",
56266+ .data = &grsec_enable_chroot_pivot,
56267+ .maxlen = sizeof(int),
56268+ .mode = 0600,
56269+ .proc_handler = &proc_dointvec,
56270+ },
56271+#endif
56272+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56273+ {
56274+ .procname = "chroot_enforce_chdir",
56275+ .data = &grsec_enable_chroot_chdir,
56276+ .maxlen = sizeof(int),
56277+ .mode = 0600,
56278+ .proc_handler = &proc_dointvec,
56279+ },
56280+#endif
56281+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56282+ {
56283+ .procname = "chroot_deny_chmod",
56284+ .data = &grsec_enable_chroot_chmod,
56285+ .maxlen = sizeof(int),
56286+ .mode = 0600,
56287+ .proc_handler = &proc_dointvec,
56288+ },
56289+#endif
56290+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56291+ {
56292+ .procname = "chroot_deny_mknod",
56293+ .data = &grsec_enable_chroot_mknod,
56294+ .maxlen = sizeof(int),
56295+ .mode = 0600,
56296+ .proc_handler = &proc_dointvec,
56297+ },
56298+#endif
56299+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56300+ {
56301+ .procname = "chroot_restrict_nice",
56302+ .data = &grsec_enable_chroot_nice,
56303+ .maxlen = sizeof(int),
56304+ .mode = 0600,
56305+ .proc_handler = &proc_dointvec,
56306+ },
56307+#endif
56308+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56309+ {
56310+ .procname = "chroot_execlog",
56311+ .data = &grsec_enable_chroot_execlog,
56312+ .maxlen = sizeof(int),
56313+ .mode = 0600,
56314+ .proc_handler = &proc_dointvec,
56315+ },
56316+#endif
56317+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56318+ {
56319+ .procname = "chroot_caps",
56320+ .data = &grsec_enable_chroot_caps,
56321+ .maxlen = sizeof(int),
56322+ .mode = 0600,
56323+ .proc_handler = &proc_dointvec,
56324+ },
56325+#endif
56326+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56327+ {
56328+ .procname = "chroot_deny_sysctl",
56329+ .data = &grsec_enable_chroot_sysctl,
56330+ .maxlen = sizeof(int),
56331+ .mode = 0600,
56332+ .proc_handler = &proc_dointvec,
56333+ },
56334+#endif
56335+#ifdef CONFIG_GRKERNSEC_TPE
56336+ {
56337+ .procname = "tpe",
56338+ .data = &grsec_enable_tpe,
56339+ .maxlen = sizeof(int),
56340+ .mode = 0600,
56341+ .proc_handler = &proc_dointvec,
56342+ },
56343+ {
56344+ .procname = "tpe_gid",
56345+ .data = &grsec_tpe_gid,
56346+ .maxlen = sizeof(int),
56347+ .mode = 0600,
56348+ .proc_handler = &proc_dointvec,
56349+ },
56350+#endif
56351+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56352+ {
56353+ .procname = "tpe_invert",
56354+ .data = &grsec_enable_tpe_invert,
56355+ .maxlen = sizeof(int),
56356+ .mode = 0600,
56357+ .proc_handler = &proc_dointvec,
56358+ },
56359+#endif
56360+#ifdef CONFIG_GRKERNSEC_TPE_ALL
56361+ {
56362+ .procname = "tpe_restrict_all",
56363+ .data = &grsec_enable_tpe_all,
56364+ .maxlen = sizeof(int),
56365+ .mode = 0600,
56366+ .proc_handler = &proc_dointvec,
56367+ },
56368+#endif
56369+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56370+ {
56371+ .procname = "socket_all",
56372+ .data = &grsec_enable_socket_all,
56373+ .maxlen = sizeof(int),
56374+ .mode = 0600,
56375+ .proc_handler = &proc_dointvec,
56376+ },
56377+ {
56378+ .procname = "socket_all_gid",
56379+ .data = &grsec_socket_all_gid,
56380+ .maxlen = sizeof(int),
56381+ .mode = 0600,
56382+ .proc_handler = &proc_dointvec,
56383+ },
56384+#endif
56385+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56386+ {
56387+ .procname = "socket_client",
56388+ .data = &grsec_enable_socket_client,
56389+ .maxlen = sizeof(int),
56390+ .mode = 0600,
56391+ .proc_handler = &proc_dointvec,
56392+ },
56393+ {
56394+ .procname = "socket_client_gid",
56395+ .data = &grsec_socket_client_gid,
56396+ .maxlen = sizeof(int),
56397+ .mode = 0600,
56398+ .proc_handler = &proc_dointvec,
56399+ },
56400+#endif
56401+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56402+ {
56403+ .procname = "socket_server",
56404+ .data = &grsec_enable_socket_server,
56405+ .maxlen = sizeof(int),
56406+ .mode = 0600,
56407+ .proc_handler = &proc_dointvec,
56408+ },
56409+ {
56410+ .procname = "socket_server_gid",
56411+ .data = &grsec_socket_server_gid,
56412+ .maxlen = sizeof(int),
56413+ .mode = 0600,
56414+ .proc_handler = &proc_dointvec,
56415+ },
56416+#endif
56417+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56418+ {
56419+ .procname = "audit_group",
56420+ .data = &grsec_enable_group,
56421+ .maxlen = sizeof(int),
56422+ .mode = 0600,
56423+ .proc_handler = &proc_dointvec,
56424+ },
56425+ {
56426+ .procname = "audit_gid",
56427+ .data = &grsec_audit_gid,
56428+ .maxlen = sizeof(int),
56429+ .mode = 0600,
56430+ .proc_handler = &proc_dointvec,
56431+ },
56432+#endif
56433+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56434+ {
56435+ .procname = "audit_chdir",
56436+ .data = &grsec_enable_chdir,
56437+ .maxlen = sizeof(int),
56438+ .mode = 0600,
56439+ .proc_handler = &proc_dointvec,
56440+ },
56441+#endif
56442+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56443+ {
56444+ .procname = "audit_mount",
56445+ .data = &grsec_enable_mount,
56446+ .maxlen = sizeof(int),
56447+ .mode = 0600,
56448+ .proc_handler = &proc_dointvec,
56449+ },
56450+#endif
56451+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56452+ {
56453+ .procname = "audit_textrel",
56454+ .data = &grsec_enable_audit_textrel,
56455+ .maxlen = sizeof(int),
56456+ .mode = 0600,
56457+ .proc_handler = &proc_dointvec,
56458+ },
56459+#endif
56460+#ifdef CONFIG_GRKERNSEC_DMESG
56461+ {
56462+ .procname = "dmesg",
56463+ .data = &grsec_enable_dmesg,
56464+ .maxlen = sizeof(int),
56465+ .mode = 0600,
56466+ .proc_handler = &proc_dointvec,
56467+ },
56468+#endif
56469+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56470+ {
56471+ .procname = "chroot_findtask",
56472+ .data = &grsec_enable_chroot_findtask,
56473+ .maxlen = sizeof(int),
56474+ .mode = 0600,
56475+ .proc_handler = &proc_dointvec,
56476+ },
56477+#endif
56478+#ifdef CONFIG_GRKERNSEC_RESLOG
56479+ {
56480+ .procname = "resource_logging",
56481+ .data = &grsec_resource_logging,
56482+ .maxlen = sizeof(int),
56483+ .mode = 0600,
56484+ .proc_handler = &proc_dointvec,
56485+ },
56486+#endif
56487+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56488+ {
56489+ .procname = "audit_ptrace",
56490+ .data = &grsec_enable_audit_ptrace,
56491+ .maxlen = sizeof(int),
56492+ .mode = 0600,
56493+ .proc_handler = &proc_dointvec,
56494+ },
56495+#endif
56496+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56497+ {
56498+ .procname = "harden_ptrace",
56499+ .data = &grsec_enable_harden_ptrace,
56500+ .maxlen = sizeof(int),
56501+ .mode = 0600,
56502+ .proc_handler = &proc_dointvec,
56503+ },
56504+#endif
56505+ {
56506+ .procname = "grsec_lock",
56507+ .data = &grsec_lock,
56508+ .maxlen = sizeof(int),
56509+ .mode = 0600,
56510+ .proc_handler = &proc_dointvec,
56511+ },
56512+#endif
56513+#ifdef CONFIG_GRKERNSEC_ROFS
56514+ {
56515+ .procname = "romount_protect",
56516+ .data = &grsec_enable_rofs,
56517+ .maxlen = sizeof(int),
56518+ .mode = 0600,
56519+ .proc_handler = &proc_dointvec_minmax,
56520+ .extra1 = &one,
56521+ .extra2 = &one,
56522+ },
56523+#endif
56524+ { }
56525+};
56526+#endif
56527diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
56528new file mode 100644
56529index 0000000..0dc13c3
56530--- /dev/null
56531+++ b/grsecurity/grsec_time.c
56532@@ -0,0 +1,16 @@
56533+#include <linux/kernel.h>
56534+#include <linux/sched.h>
56535+#include <linux/grinternal.h>
56536+#include <linux/module.h>
56537+
56538+void
56539+gr_log_timechange(void)
56540+{
56541+#ifdef CONFIG_GRKERNSEC_TIME
56542+ if (grsec_enable_time)
56543+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
56544+#endif
56545+ return;
56546+}
56547+
56548+EXPORT_SYMBOL(gr_log_timechange);
56549diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
56550new file mode 100644
56551index 0000000..4a78774
56552--- /dev/null
56553+++ b/grsecurity/grsec_tpe.c
56554@@ -0,0 +1,39 @@
56555+#include <linux/kernel.h>
56556+#include <linux/sched.h>
56557+#include <linux/file.h>
56558+#include <linux/fs.h>
56559+#include <linux/grinternal.h>
56560+
56561+extern int gr_acl_tpe_check(void);
56562+
56563+int
56564+gr_tpe_allow(const struct file *file)
56565+{
56566+#ifdef CONFIG_GRKERNSEC
56567+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
56568+ const struct cred *cred = current_cred();
56569+
56570+ if (cred->uid && ((grsec_enable_tpe &&
56571+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56572+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
56573+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
56574+#else
56575+ in_group_p(grsec_tpe_gid)
56576+#endif
56577+ ) || gr_acl_tpe_check()) &&
56578+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
56579+ (inode->i_mode & S_IWOTH))))) {
56580+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
56581+ return 0;
56582+ }
56583+#ifdef CONFIG_GRKERNSEC_TPE_ALL
56584+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
56585+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
56586+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
56587+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
56588+ return 0;
56589+ }
56590+#endif
56591+#endif
56592+ return 1;
56593+}
56594diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
56595new file mode 100644
56596index 0000000..9f7b1ac
56597--- /dev/null
56598+++ b/grsecurity/grsum.c
56599@@ -0,0 +1,61 @@
56600+#include <linux/err.h>
56601+#include <linux/kernel.h>
56602+#include <linux/sched.h>
56603+#include <linux/mm.h>
56604+#include <linux/scatterlist.h>
56605+#include <linux/crypto.h>
56606+#include <linux/gracl.h>
56607+
56608+
56609+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
56610+#error "crypto and sha256 must be built into the kernel"
56611+#endif
56612+
56613+int
56614+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
56615+{
56616+ char *p;
56617+ struct crypto_hash *tfm;
56618+ struct hash_desc desc;
56619+ struct scatterlist sg;
56620+ unsigned char temp_sum[GR_SHA_LEN];
56621+ volatile int retval = 0;
56622+ volatile int dummy = 0;
56623+ unsigned int i;
56624+
56625+ sg_init_table(&sg, 1);
56626+
56627+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
56628+ if (IS_ERR(tfm)) {
56629+ /* should never happen, since sha256 should be built in */
56630+ return 1;
56631+ }
56632+
56633+ desc.tfm = tfm;
56634+ desc.flags = 0;
56635+
56636+ crypto_hash_init(&desc);
56637+
56638+ p = salt;
56639+ sg_set_buf(&sg, p, GR_SALT_LEN);
56640+ crypto_hash_update(&desc, &sg, sg.length);
56641+
56642+ p = entry->pw;
56643+ sg_set_buf(&sg, p, strlen(p));
56644+
56645+ crypto_hash_update(&desc, &sg, sg.length);
56646+
56647+ crypto_hash_final(&desc, temp_sum);
56648+
56649+ memset(entry->pw, 0, GR_PW_LEN);
56650+
56651+ for (i = 0; i < GR_SHA_LEN; i++)
56652+ if (sum[i] != temp_sum[i])
56653+ retval = 1;
56654+ else
56655+ dummy = 1; // waste a cycle
56656+
56657+ crypto_free_hash(tfm);
56658+
56659+ return retval;
56660+}
56661diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
56662index 6cd5b64..f620d2d 100644
56663--- a/include/acpi/acpi_bus.h
56664+++ b/include/acpi/acpi_bus.h
56665@@ -107,7 +107,7 @@ struct acpi_device_ops {
56666 acpi_op_bind bind;
56667 acpi_op_unbind unbind;
56668 acpi_op_notify notify;
56669-};
56670+} __no_const;
56671
56672 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56673
56674diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
56675index b7babf0..71e4e74 100644
56676--- a/include/asm-generic/atomic-long.h
56677+++ b/include/asm-generic/atomic-long.h
56678@@ -22,6 +22,12 @@
56679
56680 typedef atomic64_t atomic_long_t;
56681
56682+#ifdef CONFIG_PAX_REFCOUNT
56683+typedef atomic64_unchecked_t atomic_long_unchecked_t;
56684+#else
56685+typedef atomic64_t atomic_long_unchecked_t;
56686+#endif
56687+
56688 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56689
56690 static inline long atomic_long_read(atomic_long_t *l)
56691@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
56692 return (long)atomic64_read(v);
56693 }
56694
56695+#ifdef CONFIG_PAX_REFCOUNT
56696+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56697+{
56698+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56699+
56700+ return (long)atomic64_read_unchecked(v);
56701+}
56702+#endif
56703+
56704 static inline void atomic_long_set(atomic_long_t *l, long i)
56705 {
56706 atomic64_t *v = (atomic64_t *)l;
56707@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
56708 atomic64_set(v, i);
56709 }
56710
56711+#ifdef CONFIG_PAX_REFCOUNT
56712+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56713+{
56714+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56715+
56716+ atomic64_set_unchecked(v, i);
56717+}
56718+#endif
56719+
56720 static inline void atomic_long_inc(atomic_long_t *l)
56721 {
56722 atomic64_t *v = (atomic64_t *)l;
56723@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
56724 atomic64_inc(v);
56725 }
56726
56727+#ifdef CONFIG_PAX_REFCOUNT
56728+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56729+{
56730+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56731+
56732+ atomic64_inc_unchecked(v);
56733+}
56734+#endif
56735+
56736 static inline void atomic_long_dec(atomic_long_t *l)
56737 {
56738 atomic64_t *v = (atomic64_t *)l;
56739@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
56740 atomic64_dec(v);
56741 }
56742
56743+#ifdef CONFIG_PAX_REFCOUNT
56744+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56745+{
56746+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56747+
56748+ atomic64_dec_unchecked(v);
56749+}
56750+#endif
56751+
56752 static inline void atomic_long_add(long i, atomic_long_t *l)
56753 {
56754 atomic64_t *v = (atomic64_t *)l;
56755@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
56756 atomic64_add(i, v);
56757 }
56758
56759+#ifdef CONFIG_PAX_REFCOUNT
56760+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56761+{
56762+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56763+
56764+ atomic64_add_unchecked(i, v);
56765+}
56766+#endif
56767+
56768 static inline void atomic_long_sub(long i, atomic_long_t *l)
56769 {
56770 atomic64_t *v = (atomic64_t *)l;
56771@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
56772 atomic64_sub(i, v);
56773 }
56774
56775+#ifdef CONFIG_PAX_REFCOUNT
56776+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
56777+{
56778+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56779+
56780+ atomic64_sub_unchecked(i, v);
56781+}
56782+#endif
56783+
56784 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
56785 {
56786 atomic64_t *v = (atomic64_t *)l;
56787@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
56788 return (long)atomic64_inc_return(v);
56789 }
56790
56791+#ifdef CONFIG_PAX_REFCOUNT
56792+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56793+{
56794+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56795+
56796+ return (long)atomic64_inc_return_unchecked(v);
56797+}
56798+#endif
56799+
56800 static inline long atomic_long_dec_return(atomic_long_t *l)
56801 {
56802 atomic64_t *v = (atomic64_t *)l;
56803@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
56804
56805 typedef atomic_t atomic_long_t;
56806
56807+#ifdef CONFIG_PAX_REFCOUNT
56808+typedef atomic_unchecked_t atomic_long_unchecked_t;
56809+#else
56810+typedef atomic_t atomic_long_unchecked_t;
56811+#endif
56812+
56813 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
56814 static inline long atomic_long_read(atomic_long_t *l)
56815 {
56816@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
56817 return (long)atomic_read(v);
56818 }
56819
56820+#ifdef CONFIG_PAX_REFCOUNT
56821+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56822+{
56823+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56824+
56825+ return (long)atomic_read_unchecked(v);
56826+}
56827+#endif
56828+
56829 static inline void atomic_long_set(atomic_long_t *l, long i)
56830 {
56831 atomic_t *v = (atomic_t *)l;
56832@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
56833 atomic_set(v, i);
56834 }
56835
56836+#ifdef CONFIG_PAX_REFCOUNT
56837+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56838+{
56839+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56840+
56841+ atomic_set_unchecked(v, i);
56842+}
56843+#endif
56844+
56845 static inline void atomic_long_inc(atomic_long_t *l)
56846 {
56847 atomic_t *v = (atomic_t *)l;
56848@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
56849 atomic_inc(v);
56850 }
56851
56852+#ifdef CONFIG_PAX_REFCOUNT
56853+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56854+{
56855+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56856+
56857+ atomic_inc_unchecked(v);
56858+}
56859+#endif
56860+
56861 static inline void atomic_long_dec(atomic_long_t *l)
56862 {
56863 atomic_t *v = (atomic_t *)l;
56864@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
56865 atomic_dec(v);
56866 }
56867
56868+#ifdef CONFIG_PAX_REFCOUNT
56869+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56870+{
56871+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56872+
56873+ atomic_dec_unchecked(v);
56874+}
56875+#endif
56876+
56877 static inline void atomic_long_add(long i, atomic_long_t *l)
56878 {
56879 atomic_t *v = (atomic_t *)l;
56880@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
56881 atomic_add(i, v);
56882 }
56883
56884+#ifdef CONFIG_PAX_REFCOUNT
56885+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56886+{
56887+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56888+
56889+ atomic_add_unchecked(i, v);
56890+}
56891+#endif
56892+
56893 static inline void atomic_long_sub(long i, atomic_long_t *l)
56894 {
56895 atomic_t *v = (atomic_t *)l;
56896@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
56897 atomic_sub(i, v);
56898 }
56899
56900+#ifdef CONFIG_PAX_REFCOUNT
56901+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
56902+{
56903+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56904+
56905+ atomic_sub_unchecked(i, v);
56906+}
56907+#endif
56908+
56909 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
56910 {
56911 atomic_t *v = (atomic_t *)l;
56912@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
56913 return (long)atomic_inc_return(v);
56914 }
56915
56916+#ifdef CONFIG_PAX_REFCOUNT
56917+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56918+{
56919+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56920+
56921+ return (long)atomic_inc_return_unchecked(v);
56922+}
56923+#endif
56924+
56925 static inline long atomic_long_dec_return(atomic_long_t *l)
56926 {
56927 atomic_t *v = (atomic_t *)l;
56928@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
56929
56930 #endif /* BITS_PER_LONG == 64 */
56931
56932+#ifdef CONFIG_PAX_REFCOUNT
56933+static inline void pax_refcount_needs_these_functions(void)
56934+{
56935+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
56936+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
56937+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
56938+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
56939+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
56940+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
56941+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
56942+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
56943+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
56944+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
56945+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
56946+
56947+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
56948+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
56949+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
56950+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
56951+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
56952+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
56953+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
56954+}
56955+#else
56956+#define atomic_read_unchecked(v) atomic_read(v)
56957+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
56958+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
56959+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
56960+#define atomic_inc_unchecked(v) atomic_inc(v)
56961+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
56962+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
56963+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
56964+#define atomic_dec_unchecked(v) atomic_dec(v)
56965+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
56966+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
56967+
56968+#define atomic_long_read_unchecked(v) atomic_long_read(v)
56969+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
56970+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
56971+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
56972+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
56973+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
56974+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
56975+#endif
56976+
56977 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
56978diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
56979index b18ce4f..2ee2843 100644
56980--- a/include/asm-generic/atomic64.h
56981+++ b/include/asm-generic/atomic64.h
56982@@ -16,6 +16,8 @@ typedef struct {
56983 long long counter;
56984 } atomic64_t;
56985
56986+typedef atomic64_t atomic64_unchecked_t;
56987+
56988 #define ATOMIC64_INIT(i) { (i) }
56989
56990 extern long long atomic64_read(const atomic64_t *v);
56991@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
56992 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
56993 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
56994
56995+#define atomic64_read_unchecked(v) atomic64_read(v)
56996+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
56997+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
56998+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
56999+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
57000+#define atomic64_inc_unchecked(v) atomic64_inc(v)
57001+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
57002+#define atomic64_dec_unchecked(v) atomic64_dec(v)
57003+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
57004+
57005 #endif /* _ASM_GENERIC_ATOMIC64_H */
57006diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
57007index 1bfcfe5..e04c5c9 100644
57008--- a/include/asm-generic/cache.h
57009+++ b/include/asm-generic/cache.h
57010@@ -6,7 +6,7 @@
57011 * cache lines need to provide their own cache.h.
57012 */
57013
57014-#define L1_CACHE_SHIFT 5
57015-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57016+#define L1_CACHE_SHIFT 5UL
57017+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57018
57019 #endif /* __ASM_GENERIC_CACHE_H */
57020diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
57021index 1ca3efc..e3dc852 100644
57022--- a/include/asm-generic/int-l64.h
57023+++ b/include/asm-generic/int-l64.h
57024@@ -46,6 +46,8 @@ typedef unsigned int u32;
57025 typedef signed long s64;
57026 typedef unsigned long u64;
57027
57028+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57029+
57030 #define S8_C(x) x
57031 #define U8_C(x) x ## U
57032 #define S16_C(x) x
57033diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
57034index f394147..b6152b9 100644
57035--- a/include/asm-generic/int-ll64.h
57036+++ b/include/asm-generic/int-ll64.h
57037@@ -51,6 +51,8 @@ typedef unsigned int u32;
57038 typedef signed long long s64;
57039 typedef unsigned long long u64;
57040
57041+typedef unsigned long long intoverflow_t;
57042+
57043 #define S8_C(x) x
57044 #define U8_C(x) x ## U
57045 #define S16_C(x) x
57046diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
57047index 0232ccb..13d9165 100644
57048--- a/include/asm-generic/kmap_types.h
57049+++ b/include/asm-generic/kmap_types.h
57050@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57051 KMAP_D(17) KM_NMI,
57052 KMAP_D(18) KM_NMI_PTE,
57053 KMAP_D(19) KM_KDB,
57054+KMAP_D(20) KM_CLEARPAGE,
57055 /*
57056 * Remember to update debug_kmap_atomic() when adding new kmap types!
57057 */
57058-KMAP_D(20) KM_TYPE_NR
57059+KMAP_D(21) KM_TYPE_NR
57060 };
57061
57062 #undef KMAP_D
57063diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
57064index 725612b..9cc513a 100644
57065--- a/include/asm-generic/pgtable-nopmd.h
57066+++ b/include/asm-generic/pgtable-nopmd.h
57067@@ -1,14 +1,19 @@
57068 #ifndef _PGTABLE_NOPMD_H
57069 #define _PGTABLE_NOPMD_H
57070
57071-#ifndef __ASSEMBLY__
57072-
57073 #include <asm-generic/pgtable-nopud.h>
57074
57075-struct mm_struct;
57076-
57077 #define __PAGETABLE_PMD_FOLDED
57078
57079+#define PMD_SHIFT PUD_SHIFT
57080+#define PTRS_PER_PMD 1
57081+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57082+#define PMD_MASK (~(PMD_SIZE-1))
57083+
57084+#ifndef __ASSEMBLY__
57085+
57086+struct mm_struct;
57087+
57088 /*
57089 * Having the pmd type consist of a pud gets the size right, and allows
57090 * us to conceptually access the pud entry that this pmd is folded into
57091@@ -16,11 +21,6 @@ struct mm_struct;
57092 */
57093 typedef struct { pud_t pud; } pmd_t;
57094
57095-#define PMD_SHIFT PUD_SHIFT
57096-#define PTRS_PER_PMD 1
57097-#define PMD_SIZE (1UL << PMD_SHIFT)
57098-#define PMD_MASK (~(PMD_SIZE-1))
57099-
57100 /*
57101 * The "pud_xxx()" functions here are trivial for a folded two-level
57102 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57103diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
57104index 810431d..ccc3638 100644
57105--- a/include/asm-generic/pgtable-nopud.h
57106+++ b/include/asm-generic/pgtable-nopud.h
57107@@ -1,10 +1,15 @@
57108 #ifndef _PGTABLE_NOPUD_H
57109 #define _PGTABLE_NOPUD_H
57110
57111-#ifndef __ASSEMBLY__
57112-
57113 #define __PAGETABLE_PUD_FOLDED
57114
57115+#define PUD_SHIFT PGDIR_SHIFT
57116+#define PTRS_PER_PUD 1
57117+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57118+#define PUD_MASK (~(PUD_SIZE-1))
57119+
57120+#ifndef __ASSEMBLY__
57121+
57122 /*
57123 * Having the pud type consist of a pgd gets the size right, and allows
57124 * us to conceptually access the pgd entry that this pud is folded into
57125@@ -12,11 +17,6 @@
57126 */
57127 typedef struct { pgd_t pgd; } pud_t;
57128
57129-#define PUD_SHIFT PGDIR_SHIFT
57130-#define PTRS_PER_PUD 1
57131-#define PUD_SIZE (1UL << PUD_SHIFT)
57132-#define PUD_MASK (~(PUD_SIZE-1))
57133-
57134 /*
57135 * The "pgd_xxx()" functions here are trivial for a folded two-level
57136 * setup: the pud is never bad, and a pud always exists (as it's folded
57137diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
57138index 76bff2b..c7a14e2 100644
57139--- a/include/asm-generic/pgtable.h
57140+++ b/include/asm-generic/pgtable.h
57141@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57142 #endif /* __HAVE_ARCH_PMD_WRITE */
57143 #endif
57144
57145+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57146+static inline unsigned long pax_open_kernel(void) { return 0; }
57147+#endif
57148+
57149+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57150+static inline unsigned long pax_close_kernel(void) { return 0; }
57151+#endif
57152+
57153 #endif /* !__ASSEMBLY__ */
57154
57155 #endif /* _ASM_GENERIC_PGTABLE_H */
57156diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
57157index b5e2e4c..6a5373e 100644
57158--- a/include/asm-generic/vmlinux.lds.h
57159+++ b/include/asm-generic/vmlinux.lds.h
57160@@ -217,6 +217,7 @@
57161 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57162 VMLINUX_SYMBOL(__start_rodata) = .; \
57163 *(.rodata) *(.rodata.*) \
57164+ *(.data..read_only) \
57165 *(__vermagic) /* Kernel version magic */ \
57166 . = ALIGN(8); \
57167 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57168@@ -722,17 +723,18 @@
57169 * section in the linker script will go there too. @phdr should have
57170 * a leading colon.
57171 *
57172- * Note that this macros defines __per_cpu_load as an absolute symbol.
57173+ * Note that this macros defines per_cpu_load as an absolute symbol.
57174 * If there is no need to put the percpu section at a predetermined
57175 * address, use PERCPU_SECTION.
57176 */
57177 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57178- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57179- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57180+ per_cpu_load = .; \
57181+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57182 - LOAD_OFFSET) { \
57183+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57184 PERCPU_INPUT(cacheline) \
57185 } phdr \
57186- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57187+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57188
57189 /**
57190 * PERCPU_SECTION - define output section for percpu area, simple version
57191diff --git a/include/drm/drmP.h b/include/drm/drmP.h
57192index 1f9e951..14ef517 100644
57193--- a/include/drm/drmP.h
57194+++ b/include/drm/drmP.h
57195@@ -72,6 +72,7 @@
57196 #include <linux/workqueue.h>
57197 #include <linux/poll.h>
57198 #include <asm/pgalloc.h>
57199+#include <asm/local.h>
57200 #include "drm.h"
57201
57202 #include <linux/idr.h>
57203@@ -1038,7 +1039,7 @@ struct drm_device {
57204
57205 /** \name Usage Counters */
57206 /*@{ */
57207- int open_count; /**< Outstanding files open */
57208+ local_t open_count; /**< Outstanding files open */
57209 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57210 atomic_t vma_count; /**< Outstanding vma areas open */
57211 int buf_use; /**< Buffers in use -- cannot alloc */
57212@@ -1049,7 +1050,7 @@ struct drm_device {
57213 /*@{ */
57214 unsigned long counters;
57215 enum drm_stat_type types[15];
57216- atomic_t counts[15];
57217+ atomic_unchecked_t counts[15];
57218 /*@} */
57219
57220 struct list_head filelist;
57221diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
57222index 73b0712..0b7ef2f 100644
57223--- a/include/drm/drm_crtc_helper.h
57224+++ b/include/drm/drm_crtc_helper.h
57225@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57226
57227 /* disable crtc when not in use - more explicit than dpms off */
57228 void (*disable)(struct drm_crtc *crtc);
57229-};
57230+} __no_const;
57231
57232 struct drm_encoder_helper_funcs {
57233 void (*dpms)(struct drm_encoder *encoder, int mode);
57234@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57235 struct drm_connector *connector);
57236 /* disable encoder when not in use - more explicit than dpms off */
57237 void (*disable)(struct drm_encoder *encoder);
57238-};
57239+} __no_const;
57240
57241 struct drm_connector_helper_funcs {
57242 int (*get_modes)(struct drm_connector *connector);
57243diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
57244index 26c1f78..6722682 100644
57245--- a/include/drm/ttm/ttm_memory.h
57246+++ b/include/drm/ttm/ttm_memory.h
57247@@ -47,7 +47,7 @@
57248
57249 struct ttm_mem_shrink {
57250 int (*do_shrink) (struct ttm_mem_shrink *);
57251-};
57252+} __no_const;
57253
57254 /**
57255 * struct ttm_mem_global - Global memory accounting structure.
57256diff --git a/include/linux/a.out.h b/include/linux/a.out.h
57257index e86dfca..40cc55f 100644
57258--- a/include/linux/a.out.h
57259+++ b/include/linux/a.out.h
57260@@ -39,6 +39,14 @@ enum machine_type {
57261 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57262 };
57263
57264+/* Constants for the N_FLAGS field */
57265+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57266+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57267+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57268+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57269+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57270+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57271+
57272 #if !defined (N_MAGIC)
57273 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57274 #endif
57275diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
57276index 49a83ca..df96b54 100644
57277--- a/include/linux/atmdev.h
57278+++ b/include/linux/atmdev.h
57279@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57280 #endif
57281
57282 struct k_atm_aal_stats {
57283-#define __HANDLE_ITEM(i) atomic_t i
57284+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57285 __AAL_STAT_ITEMS
57286 #undef __HANDLE_ITEM
57287 };
57288diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
57289index fd88a39..f4d0bad 100644
57290--- a/include/linux/binfmts.h
57291+++ b/include/linux/binfmts.h
57292@@ -88,6 +88,7 @@ struct linux_binfmt {
57293 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57294 int (*load_shlib)(struct file *);
57295 int (*core_dump)(struct coredump_params *cprm);
57296+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57297 unsigned long min_coredump; /* minimal dump size */
57298 };
57299
57300diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
57301index 0ed1eb0..3ab569b 100644
57302--- a/include/linux/blkdev.h
57303+++ b/include/linux/blkdev.h
57304@@ -1315,7 +1315,7 @@ struct block_device_operations {
57305 /* this callback is with swap_lock and sometimes page table lock held */
57306 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57307 struct module *owner;
57308-};
57309+} __do_const;
57310
57311 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57312 unsigned long);
57313diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
57314index 4d1a074..88f929a 100644
57315--- a/include/linux/blktrace_api.h
57316+++ b/include/linux/blktrace_api.h
57317@@ -162,7 +162,7 @@ struct blk_trace {
57318 struct dentry *dir;
57319 struct dentry *dropped_file;
57320 struct dentry *msg_file;
57321- atomic_t dropped;
57322+ atomic_unchecked_t dropped;
57323 };
57324
57325 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57326diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
57327index 83195fb..0b0f77d 100644
57328--- a/include/linux/byteorder/little_endian.h
57329+++ b/include/linux/byteorder/little_endian.h
57330@@ -42,51 +42,51 @@
57331
57332 static inline __le64 __cpu_to_le64p(const __u64 *p)
57333 {
57334- return (__force __le64)*p;
57335+ return (__force const __le64)*p;
57336 }
57337 static inline __u64 __le64_to_cpup(const __le64 *p)
57338 {
57339- return (__force __u64)*p;
57340+ return (__force const __u64)*p;
57341 }
57342 static inline __le32 __cpu_to_le32p(const __u32 *p)
57343 {
57344- return (__force __le32)*p;
57345+ return (__force const __le32)*p;
57346 }
57347 static inline __u32 __le32_to_cpup(const __le32 *p)
57348 {
57349- return (__force __u32)*p;
57350+ return (__force const __u32)*p;
57351 }
57352 static inline __le16 __cpu_to_le16p(const __u16 *p)
57353 {
57354- return (__force __le16)*p;
57355+ return (__force const __le16)*p;
57356 }
57357 static inline __u16 __le16_to_cpup(const __le16 *p)
57358 {
57359- return (__force __u16)*p;
57360+ return (__force const __u16)*p;
57361 }
57362 static inline __be64 __cpu_to_be64p(const __u64 *p)
57363 {
57364- return (__force __be64)__swab64p(p);
57365+ return (__force const __be64)__swab64p(p);
57366 }
57367 static inline __u64 __be64_to_cpup(const __be64 *p)
57368 {
57369- return __swab64p((__u64 *)p);
57370+ return __swab64p((const __u64 *)p);
57371 }
57372 static inline __be32 __cpu_to_be32p(const __u32 *p)
57373 {
57374- return (__force __be32)__swab32p(p);
57375+ return (__force const __be32)__swab32p(p);
57376 }
57377 static inline __u32 __be32_to_cpup(const __be32 *p)
57378 {
57379- return __swab32p((__u32 *)p);
57380+ return __swab32p((const __u32 *)p);
57381 }
57382 static inline __be16 __cpu_to_be16p(const __u16 *p)
57383 {
57384- return (__force __be16)__swab16p(p);
57385+ return (__force const __be16)__swab16p(p);
57386 }
57387 static inline __u16 __be16_to_cpup(const __be16 *p)
57388 {
57389- return __swab16p((__u16 *)p);
57390+ return __swab16p((const __u16 *)p);
57391 }
57392 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57393 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57394diff --git a/include/linux/cache.h b/include/linux/cache.h
57395index 4c57065..4307975 100644
57396--- a/include/linux/cache.h
57397+++ b/include/linux/cache.h
57398@@ -16,6 +16,10 @@
57399 #define __read_mostly
57400 #endif
57401
57402+#ifndef __read_only
57403+#define __read_only __read_mostly
57404+#endif
57405+
57406 #ifndef ____cacheline_aligned
57407 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57408 #endif
57409diff --git a/include/linux/capability.h b/include/linux/capability.h
57410index a63d13d..069bfd5 100644
57411--- a/include/linux/capability.h
57412+++ b/include/linux/capability.h
57413@@ -548,6 +548,9 @@ extern bool capable(int cap);
57414 extern bool ns_capable(struct user_namespace *ns, int cap);
57415 extern bool task_ns_capable(struct task_struct *t, int cap);
57416 extern bool nsown_capable(int cap);
57417+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57418+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57419+extern bool capable_nolog(int cap);
57420
57421 /* audit system wants to get cap info from files as well */
57422 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57423diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
57424index 04ffb2e..6799180 100644
57425--- a/include/linux/cleancache.h
57426+++ b/include/linux/cleancache.h
57427@@ -31,7 +31,7 @@ struct cleancache_ops {
57428 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57429 void (*flush_inode)(int, struct cleancache_filekey);
57430 void (*flush_fs)(int);
57431-};
57432+} __no_const;
57433
57434 extern struct cleancache_ops
57435 cleancache_register_ops(struct cleancache_ops *ops);
57436diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
57437index dfadc96..c0e70c1 100644
57438--- a/include/linux/compiler-gcc4.h
57439+++ b/include/linux/compiler-gcc4.h
57440@@ -31,6 +31,12 @@
57441
57442
57443 #if __GNUC_MINOR__ >= 5
57444+
57445+#ifdef CONSTIFY_PLUGIN
57446+#define __no_const __attribute__((no_const))
57447+#define __do_const __attribute__((do_const))
57448+#endif
57449+
57450 /*
57451 * Mark a position in code as unreachable. This can be used to
57452 * suppress control flow warnings after asm blocks that transfer
57453@@ -46,6 +52,11 @@
57454 #define __noclone __attribute__((__noclone__))
57455
57456 #endif
57457+
57458+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57459+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57460+#define __bos0(ptr) __bos((ptr), 0)
57461+#define __bos1(ptr) __bos((ptr), 1)
57462 #endif
57463
57464 #if __GNUC_MINOR__ > 0
57465diff --git a/include/linux/compiler.h b/include/linux/compiler.h
57466index 320d6c9..8573a1c 100644
57467--- a/include/linux/compiler.h
57468+++ b/include/linux/compiler.h
57469@@ -5,31 +5,62 @@
57470
57471 #ifdef __CHECKER__
57472 # define __user __attribute__((noderef, address_space(1)))
57473+# define __force_user __force __user
57474 # define __kernel __attribute__((address_space(0)))
57475+# define __force_kernel __force __kernel
57476 # define __safe __attribute__((safe))
57477 # define __force __attribute__((force))
57478 # define __nocast __attribute__((nocast))
57479 # define __iomem __attribute__((noderef, address_space(2)))
57480+# define __force_iomem __force __iomem
57481 # define __acquires(x) __attribute__((context(x,0,1)))
57482 # define __releases(x) __attribute__((context(x,1,0)))
57483 # define __acquire(x) __context__(x,1)
57484 # define __release(x) __context__(x,-1)
57485 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57486 # define __percpu __attribute__((noderef, address_space(3)))
57487+# define __force_percpu __force __percpu
57488 #ifdef CONFIG_SPARSE_RCU_POINTER
57489 # define __rcu __attribute__((noderef, address_space(4)))
57490+# define __force_rcu __force __rcu
57491 #else
57492 # define __rcu
57493+# define __force_rcu
57494 #endif
57495 extern void __chk_user_ptr(const volatile void __user *);
57496 extern void __chk_io_ptr(const volatile void __iomem *);
57497+#elif defined(CHECKER_PLUGIN)
57498+//# define __user
57499+//# define __force_user
57500+//# define __kernel
57501+//# define __force_kernel
57502+# define __safe
57503+# define __force
57504+# define __nocast
57505+# define __iomem
57506+# define __force_iomem
57507+# define __chk_user_ptr(x) (void)0
57508+# define __chk_io_ptr(x) (void)0
57509+# define __builtin_warning(x, y...) (1)
57510+# define __acquires(x)
57511+# define __releases(x)
57512+# define __acquire(x) (void)0
57513+# define __release(x) (void)0
57514+# define __cond_lock(x,c) (c)
57515+# define __percpu
57516+# define __force_percpu
57517+# define __rcu
57518+# define __force_rcu
57519 #else
57520 # define __user
57521+# define __force_user
57522 # define __kernel
57523+# define __force_kernel
57524 # define __safe
57525 # define __force
57526 # define __nocast
57527 # define __iomem
57528+# define __force_iomem
57529 # define __chk_user_ptr(x) (void)0
57530 # define __chk_io_ptr(x) (void)0
57531 # define __builtin_warning(x, y...) (1)
57532@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
57533 # define __release(x) (void)0
57534 # define __cond_lock(x,c) (c)
57535 # define __percpu
57536+# define __force_percpu
57537 # define __rcu
57538+# define __force_rcu
57539 #endif
57540
57541 #ifdef __KERNEL__
57542@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57543 # define __attribute_const__ /* unimplemented */
57544 #endif
57545
57546+#ifndef __no_const
57547+# define __no_const
57548+#endif
57549+
57550+#ifndef __do_const
57551+# define __do_const
57552+#endif
57553+
57554 /*
57555 * Tell gcc if a function is cold. The compiler will assume any path
57556 * directly leading to the call is unlikely.
57557@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57558 #define __cold
57559 #endif
57560
57561+#ifndef __alloc_size
57562+#define __alloc_size(...)
57563+#endif
57564+
57565+#ifndef __bos
57566+#define __bos(ptr, arg)
57567+#endif
57568+
57569+#ifndef __bos0
57570+#define __bos0(ptr)
57571+#endif
57572+
57573+#ifndef __bos1
57574+#define __bos1(ptr)
57575+#endif
57576+
57577 /* Simple shorthand for a section definition */
57578 #ifndef __section
57579 # define __section(S) __attribute__ ((__section__(#S)))
57580@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57581 * use is to mediate communication between process-level code and irq/NMI
57582 * handlers, all running on the same CPU.
57583 */
57584-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57585+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57586+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57587
57588 #endif /* __LINUX_COMPILER_H */
57589diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
57590index e9eaec5..bfeb9bb 100644
57591--- a/include/linux/cpuset.h
57592+++ b/include/linux/cpuset.h
57593@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
57594 * nodemask.
57595 */
57596 smp_mb();
57597- --ACCESS_ONCE(current->mems_allowed_change_disable);
57598+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
57599 }
57600
57601 static inline void set_mems_allowed(nodemask_t nodemask)
57602diff --git a/include/linux/cred.h b/include/linux/cred.h
57603index 4030896..8d6f342 100644
57604--- a/include/linux/cred.h
57605+++ b/include/linux/cred.h
57606@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
57607 static inline void validate_process_creds(void)
57608 {
57609 }
57610+static inline void validate_task_creds(struct task_struct *task)
57611+{
57612+}
57613 #endif
57614
57615 /**
57616diff --git a/include/linux/crypto.h b/include/linux/crypto.h
57617index 8a94217..15d49e3 100644
57618--- a/include/linux/crypto.h
57619+++ b/include/linux/crypto.h
57620@@ -365,7 +365,7 @@ struct cipher_tfm {
57621 const u8 *key, unsigned int keylen);
57622 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57623 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57624-};
57625+} __no_const;
57626
57627 struct hash_tfm {
57628 int (*init)(struct hash_desc *desc);
57629@@ -386,13 +386,13 @@ struct compress_tfm {
57630 int (*cot_decompress)(struct crypto_tfm *tfm,
57631 const u8 *src, unsigned int slen,
57632 u8 *dst, unsigned int *dlen);
57633-};
57634+} __no_const;
57635
57636 struct rng_tfm {
57637 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57638 unsigned int dlen);
57639 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57640-};
57641+} __no_const;
57642
57643 #define crt_ablkcipher crt_u.ablkcipher
57644 #define crt_aead crt_u.aead
57645diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
57646index 7925bf0..d5143d2 100644
57647--- a/include/linux/decompress/mm.h
57648+++ b/include/linux/decompress/mm.h
57649@@ -77,7 +77,7 @@ static void free(void *where)
57650 * warnings when not needed (indeed large_malloc / large_free are not
57651 * needed by inflate */
57652
57653-#define malloc(a) kmalloc(a, GFP_KERNEL)
57654+#define malloc(a) kmalloc((a), GFP_KERNEL)
57655 #define free(a) kfree(a)
57656
57657 #define large_malloc(a) vmalloc(a)
57658diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
57659index e13117c..e9fc938 100644
57660--- a/include/linux/dma-mapping.h
57661+++ b/include/linux/dma-mapping.h
57662@@ -46,7 +46,7 @@ struct dma_map_ops {
57663 u64 (*get_required_mask)(struct device *dev);
57664 #endif
57665 int is_phys;
57666-};
57667+} __do_const;
57668
57669 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57670
57671diff --git a/include/linux/efi.h b/include/linux/efi.h
57672index 2362a0b..cfaf8fcc 100644
57673--- a/include/linux/efi.h
57674+++ b/include/linux/efi.h
57675@@ -446,7 +446,7 @@ struct efivar_operations {
57676 efi_get_variable_t *get_variable;
57677 efi_get_next_variable_t *get_next_variable;
57678 efi_set_variable_t *set_variable;
57679-};
57680+} __no_const;
57681
57682 struct efivars {
57683 /*
57684diff --git a/include/linux/elf.h b/include/linux/elf.h
57685index 31f0508..5421c01 100644
57686--- a/include/linux/elf.h
57687+++ b/include/linux/elf.h
57688@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57689 #define PT_GNU_EH_FRAME 0x6474e550
57690
57691 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57692+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57693+
57694+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57695+
57696+/* Constants for the e_flags field */
57697+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57698+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57699+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57700+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57701+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57702+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57703
57704 /*
57705 * Extended Numbering
57706@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
57707 #define DT_DEBUG 21
57708 #define DT_TEXTREL 22
57709 #define DT_JMPREL 23
57710+#define DT_FLAGS 30
57711+ #define DF_TEXTREL 0x00000004
57712 #define DT_ENCODING 32
57713 #define OLD_DT_LOOS 0x60000000
57714 #define DT_LOOS 0x6000000d
57715@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
57716 #define PF_W 0x2
57717 #define PF_X 0x1
57718
57719+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57720+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57721+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57722+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57723+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57724+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57725+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57726+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57727+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57728+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57729+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57730+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57731+
57732 typedef struct elf32_phdr{
57733 Elf32_Word p_type;
57734 Elf32_Off p_offset;
57735@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
57736 #define EI_OSABI 7
57737 #define EI_PAD 8
57738
57739+#define EI_PAX 14
57740+
57741 #define ELFMAG0 0x7f /* EI_MAG */
57742 #define ELFMAG1 'E'
57743 #define ELFMAG2 'L'
57744@@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
57745 #define elf_note elf32_note
57746 #define elf_addr_t Elf32_Off
57747 #define Elf_Half Elf32_Half
57748+#define elf_dyn Elf32_Dyn
57749
57750 #else
57751
57752@@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
57753 #define elf_note elf64_note
57754 #define elf_addr_t Elf64_Off
57755 #define Elf_Half Elf64_Half
57756+#define elf_dyn Elf64_Dyn
57757
57758 #endif
57759
57760diff --git a/include/linux/filter.h b/include/linux/filter.h
57761index 8eeb205..d59bfa2 100644
57762--- a/include/linux/filter.h
57763+++ b/include/linux/filter.h
57764@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
57765
57766 struct sk_buff;
57767 struct sock;
57768+struct bpf_jit_work;
57769
57770 struct sk_filter
57771 {
57772@@ -141,6 +142,9 @@ struct sk_filter
57773 unsigned int len; /* Number of filter blocks */
57774 unsigned int (*bpf_func)(const struct sk_buff *skb,
57775 const struct sock_filter *filter);
57776+#ifdef CONFIG_BPF_JIT
57777+ struct bpf_jit_work *work;
57778+#endif
57779 struct rcu_head rcu;
57780 struct sock_filter insns[0];
57781 };
57782diff --git a/include/linux/firewire.h b/include/linux/firewire.h
57783index 84ccf8e..2e9b14c 100644
57784--- a/include/linux/firewire.h
57785+++ b/include/linux/firewire.h
57786@@ -428,7 +428,7 @@ struct fw_iso_context {
57787 union {
57788 fw_iso_callback_t sc;
57789 fw_iso_mc_callback_t mc;
57790- } callback;
57791+ } __no_const callback;
57792 void *callback_data;
57793 };
57794
57795diff --git a/include/linux/fs.h b/include/linux/fs.h
57796index e0bc4ff..d79c2fa 100644
57797--- a/include/linux/fs.h
57798+++ b/include/linux/fs.h
57799@@ -1608,7 +1608,8 @@ struct file_operations {
57800 int (*setlease)(struct file *, long, struct file_lock **);
57801 long (*fallocate)(struct file *file, int mode, loff_t offset,
57802 loff_t len);
57803-};
57804+} __do_const;
57805+typedef struct file_operations __no_const file_operations_no_const;
57806
57807 struct inode_operations {
57808 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
57809diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
57810index 003dc0f..3c4ea97 100644
57811--- a/include/linux/fs_struct.h
57812+++ b/include/linux/fs_struct.h
57813@@ -6,7 +6,7 @@
57814 #include <linux/seqlock.h>
57815
57816 struct fs_struct {
57817- int users;
57818+ atomic_t users;
57819 spinlock_t lock;
57820 seqcount_t seq;
57821 int umask;
57822diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
57823index ce31408..b1ad003 100644
57824--- a/include/linux/fscache-cache.h
57825+++ b/include/linux/fscache-cache.h
57826@@ -102,7 +102,7 @@ struct fscache_operation {
57827 fscache_operation_release_t release;
57828 };
57829
57830-extern atomic_t fscache_op_debug_id;
57831+extern atomic_unchecked_t fscache_op_debug_id;
57832 extern void fscache_op_work_func(struct work_struct *work);
57833
57834 extern void fscache_enqueue_operation(struct fscache_operation *);
57835@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
57836 {
57837 INIT_WORK(&op->work, fscache_op_work_func);
57838 atomic_set(&op->usage, 1);
57839- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57840+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57841 op->processor = processor;
57842 op->release = release;
57843 INIT_LIST_HEAD(&op->pend_link);
57844diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
57845index 2a53f10..0187fdf 100644
57846--- a/include/linux/fsnotify.h
57847+++ b/include/linux/fsnotify.h
57848@@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
57849 */
57850 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
57851 {
57852- return kstrdup(name, GFP_KERNEL);
57853+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
57854 }
57855
57856 /*
57857diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
57858index 91d0e0a3..035666b 100644
57859--- a/include/linux/fsnotify_backend.h
57860+++ b/include/linux/fsnotify_backend.h
57861@@ -105,6 +105,7 @@ struct fsnotify_ops {
57862 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
57863 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
57864 };
57865+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
57866
57867 /*
57868 * A group is a "thing" that wants to receive notification about filesystem
57869diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
57870index c3da42d..c70e0df 100644
57871--- a/include/linux/ftrace_event.h
57872+++ b/include/linux/ftrace_event.h
57873@@ -97,7 +97,7 @@ struct trace_event_functions {
57874 trace_print_func raw;
57875 trace_print_func hex;
57876 trace_print_func binary;
57877-};
57878+} __no_const;
57879
57880 struct trace_event {
57881 struct hlist_node node;
57882@@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
57883 extern int trace_add_event_call(struct ftrace_event_call *call);
57884 extern void trace_remove_event_call(struct ftrace_event_call *call);
57885
57886-#define is_signed_type(type) (((type)(-1)) < 0)
57887+#define is_signed_type(type) (((type)(-1)) < (type)1)
57888
57889 int trace_set_clr_event(const char *system, const char *event, int set);
57890
57891diff --git a/include/linux/genhd.h b/include/linux/genhd.h
57892index 6d18f35..ab71e2c 100644
57893--- a/include/linux/genhd.h
57894+++ b/include/linux/genhd.h
57895@@ -185,7 +185,7 @@ struct gendisk {
57896 struct kobject *slave_dir;
57897
57898 struct timer_rand_state *random;
57899- atomic_t sync_io; /* RAID */
57900+ atomic_unchecked_t sync_io; /* RAID */
57901 struct disk_events *ev;
57902 #ifdef CONFIG_BLK_DEV_INTEGRITY
57903 struct blk_integrity *integrity;
57904diff --git a/include/linux/gracl.h b/include/linux/gracl.h
57905new file mode 100644
57906index 0000000..0dc3943
57907--- /dev/null
57908+++ b/include/linux/gracl.h
57909@@ -0,0 +1,317 @@
57910+#ifndef GR_ACL_H
57911+#define GR_ACL_H
57912+
57913+#include <linux/grdefs.h>
57914+#include <linux/resource.h>
57915+#include <linux/capability.h>
57916+#include <linux/dcache.h>
57917+#include <asm/resource.h>
57918+
57919+/* Major status information */
57920+
57921+#define GR_VERSION "grsecurity 2.2.2"
57922+#define GRSECURITY_VERSION 0x2202
57923+
57924+enum {
57925+ GR_SHUTDOWN = 0,
57926+ GR_ENABLE = 1,
57927+ GR_SPROLE = 2,
57928+ GR_RELOAD = 3,
57929+ GR_SEGVMOD = 4,
57930+ GR_STATUS = 5,
57931+ GR_UNSPROLE = 6,
57932+ GR_PASSSET = 7,
57933+ GR_SPROLEPAM = 8,
57934+};
57935+
57936+/* Password setup definitions
57937+ * kernel/grhash.c */
57938+enum {
57939+ GR_PW_LEN = 128,
57940+ GR_SALT_LEN = 16,
57941+ GR_SHA_LEN = 32,
57942+};
57943+
57944+enum {
57945+ GR_SPROLE_LEN = 64,
57946+};
57947+
57948+enum {
57949+ GR_NO_GLOB = 0,
57950+ GR_REG_GLOB,
57951+ GR_CREATE_GLOB
57952+};
57953+
57954+#define GR_NLIMITS 32
57955+
57956+/* Begin Data Structures */
57957+
57958+struct sprole_pw {
57959+ unsigned char *rolename;
57960+ unsigned char salt[GR_SALT_LEN];
57961+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
57962+};
57963+
57964+struct name_entry {
57965+ __u32 key;
57966+ ino_t inode;
57967+ dev_t device;
57968+ char *name;
57969+ __u16 len;
57970+ __u8 deleted;
57971+ struct name_entry *prev;
57972+ struct name_entry *next;
57973+};
57974+
57975+struct inodev_entry {
57976+ struct name_entry *nentry;
57977+ struct inodev_entry *prev;
57978+ struct inodev_entry *next;
57979+};
57980+
57981+struct acl_role_db {
57982+ struct acl_role_label **r_hash;
57983+ __u32 r_size;
57984+};
57985+
57986+struct inodev_db {
57987+ struct inodev_entry **i_hash;
57988+ __u32 i_size;
57989+};
57990+
57991+struct name_db {
57992+ struct name_entry **n_hash;
57993+ __u32 n_size;
57994+};
57995+
57996+struct crash_uid {
57997+ uid_t uid;
57998+ unsigned long expires;
57999+};
58000+
58001+struct gr_hash_struct {
58002+ void **table;
58003+ void **nametable;
58004+ void *first;
58005+ __u32 table_size;
58006+ __u32 used_size;
58007+ int type;
58008+};
58009+
58010+/* Userspace Grsecurity ACL data structures */
58011+
58012+struct acl_subject_label {
58013+ char *filename;
58014+ ino_t inode;
58015+ dev_t device;
58016+ __u32 mode;
58017+ kernel_cap_t cap_mask;
58018+ kernel_cap_t cap_lower;
58019+ kernel_cap_t cap_invert_audit;
58020+
58021+ struct rlimit res[GR_NLIMITS];
58022+ __u32 resmask;
58023+
58024+ __u8 user_trans_type;
58025+ __u8 group_trans_type;
58026+ uid_t *user_transitions;
58027+ gid_t *group_transitions;
58028+ __u16 user_trans_num;
58029+ __u16 group_trans_num;
58030+
58031+ __u32 sock_families[2];
58032+ __u32 ip_proto[8];
58033+ __u32 ip_type;
58034+ struct acl_ip_label **ips;
58035+ __u32 ip_num;
58036+ __u32 inaddr_any_override;
58037+
58038+ __u32 crashes;
58039+ unsigned long expires;
58040+
58041+ struct acl_subject_label *parent_subject;
58042+ struct gr_hash_struct *hash;
58043+ struct acl_subject_label *prev;
58044+ struct acl_subject_label *next;
58045+
58046+ struct acl_object_label **obj_hash;
58047+ __u32 obj_hash_size;
58048+ __u16 pax_flags;
58049+};
58050+
58051+struct role_allowed_ip {
58052+ __u32 addr;
58053+ __u32 netmask;
58054+
58055+ struct role_allowed_ip *prev;
58056+ struct role_allowed_ip *next;
58057+};
58058+
58059+struct role_transition {
58060+ char *rolename;
58061+
58062+ struct role_transition *prev;
58063+ struct role_transition *next;
58064+};
58065+
58066+struct acl_role_label {
58067+ char *rolename;
58068+ uid_t uidgid;
58069+ __u16 roletype;
58070+
58071+ __u16 auth_attempts;
58072+ unsigned long expires;
58073+
58074+ struct acl_subject_label *root_label;
58075+ struct gr_hash_struct *hash;
58076+
58077+ struct acl_role_label *prev;
58078+ struct acl_role_label *next;
58079+
58080+ struct role_transition *transitions;
58081+ struct role_allowed_ip *allowed_ips;
58082+ uid_t *domain_children;
58083+ __u16 domain_child_num;
58084+
58085+ struct acl_subject_label **subj_hash;
58086+ __u32 subj_hash_size;
58087+};
58088+
58089+struct user_acl_role_db {
58090+ struct acl_role_label **r_table;
58091+ __u32 num_pointers; /* Number of allocations to track */
58092+ __u32 num_roles; /* Number of roles */
58093+ __u32 num_domain_children; /* Number of domain children */
58094+ __u32 num_subjects; /* Number of subjects */
58095+ __u32 num_objects; /* Number of objects */
58096+};
58097+
58098+struct acl_object_label {
58099+ char *filename;
58100+ ino_t inode;
58101+ dev_t device;
58102+ __u32 mode;
58103+
58104+ struct acl_subject_label *nested;
58105+ struct acl_object_label *globbed;
58106+
58107+ /* next two structures not used */
58108+
58109+ struct acl_object_label *prev;
58110+ struct acl_object_label *next;
58111+};
58112+
58113+struct acl_ip_label {
58114+ char *iface;
58115+ __u32 addr;
58116+ __u32 netmask;
58117+ __u16 low, high;
58118+ __u8 mode;
58119+ __u32 type;
58120+ __u32 proto[8];
58121+
58122+ /* next two structures not used */
58123+
58124+ struct acl_ip_label *prev;
58125+ struct acl_ip_label *next;
58126+};
58127+
58128+struct gr_arg {
58129+ struct user_acl_role_db role_db;
58130+ unsigned char pw[GR_PW_LEN];
58131+ unsigned char salt[GR_SALT_LEN];
58132+ unsigned char sum[GR_SHA_LEN];
58133+ unsigned char sp_role[GR_SPROLE_LEN];
58134+ struct sprole_pw *sprole_pws;
58135+ dev_t segv_device;
58136+ ino_t segv_inode;
58137+ uid_t segv_uid;
58138+ __u16 num_sprole_pws;
58139+ __u16 mode;
58140+};
58141+
58142+struct gr_arg_wrapper {
58143+ struct gr_arg *arg;
58144+ __u32 version;
58145+ __u32 size;
58146+};
58147+
58148+struct subject_map {
58149+ struct acl_subject_label *user;
58150+ struct acl_subject_label *kernel;
58151+ struct subject_map *prev;
58152+ struct subject_map *next;
58153+};
58154+
58155+struct acl_subj_map_db {
58156+ struct subject_map **s_hash;
58157+ __u32 s_size;
58158+};
58159+
58160+/* End Data Structures Section */
58161+
58162+/* Hash functions generated by empirical testing by Brad Spengler
58163+ Makes good use of the low bits of the inode. Generally 0-1 times
58164+ in loop for successful match. 0-3 for unsuccessful match.
58165+ Shift/add algorithm with modulus of table size and an XOR*/
58166+
58167+static __inline__ unsigned int
58168+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58169+{
58170+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58171+}
58172+
58173+ static __inline__ unsigned int
58174+shash(const struct acl_subject_label *userp, const unsigned int sz)
58175+{
58176+ return ((const unsigned long)userp % sz);
58177+}
58178+
58179+static __inline__ unsigned int
58180+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58181+{
58182+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58183+}
58184+
58185+static __inline__ unsigned int
58186+nhash(const char *name, const __u16 len, const unsigned int sz)
58187+{
58188+ return full_name_hash((const unsigned char *)name, len) % sz;
58189+}
58190+
58191+#define FOR_EACH_ROLE_START(role) \
58192+ role = role_list; \
58193+ while (role) {
58194+
58195+#define FOR_EACH_ROLE_END(role) \
58196+ role = role->prev; \
58197+ }
58198+
58199+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58200+ subj = NULL; \
58201+ iter = 0; \
58202+ while (iter < role->subj_hash_size) { \
58203+ if (subj == NULL) \
58204+ subj = role->subj_hash[iter]; \
58205+ if (subj == NULL) { \
58206+ iter++; \
58207+ continue; \
58208+ }
58209+
58210+#define FOR_EACH_SUBJECT_END(subj,iter) \
58211+ subj = subj->next; \
58212+ if (subj == NULL) \
58213+ iter++; \
58214+ }
58215+
58216+
58217+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58218+ subj = role->hash->first; \
58219+ while (subj != NULL) {
58220+
58221+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58222+ subj = subj->next; \
58223+ }
58224+
58225+#endif
58226+
58227diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
58228new file mode 100644
58229index 0000000..323ecf2
58230--- /dev/null
58231+++ b/include/linux/gralloc.h
58232@@ -0,0 +1,9 @@
58233+#ifndef __GRALLOC_H
58234+#define __GRALLOC_H
58235+
58236+void acl_free_all(void);
58237+int acl_alloc_stack_init(unsigned long size);
58238+void *acl_alloc(unsigned long len);
58239+void *acl_alloc_num(unsigned long num, unsigned long len);
58240+
58241+#endif
58242diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
58243new file mode 100644
58244index 0000000..b30e9bc
58245--- /dev/null
58246+++ b/include/linux/grdefs.h
58247@@ -0,0 +1,140 @@
58248+#ifndef GRDEFS_H
58249+#define GRDEFS_H
58250+
58251+/* Begin grsecurity status declarations */
58252+
58253+enum {
58254+ GR_READY = 0x01,
58255+ GR_STATUS_INIT = 0x00 // disabled state
58256+};
58257+
58258+/* Begin ACL declarations */
58259+
58260+/* Role flags */
58261+
58262+enum {
58263+ GR_ROLE_USER = 0x0001,
58264+ GR_ROLE_GROUP = 0x0002,
58265+ GR_ROLE_DEFAULT = 0x0004,
58266+ GR_ROLE_SPECIAL = 0x0008,
58267+ GR_ROLE_AUTH = 0x0010,
58268+ GR_ROLE_NOPW = 0x0020,
58269+ GR_ROLE_GOD = 0x0040,
58270+ GR_ROLE_LEARN = 0x0080,
58271+ GR_ROLE_TPE = 0x0100,
58272+ GR_ROLE_DOMAIN = 0x0200,
58273+ GR_ROLE_PAM = 0x0400,
58274+ GR_ROLE_PERSIST = 0x0800
58275+};
58276+
58277+/* ACL Subject and Object mode flags */
58278+enum {
58279+ GR_DELETED = 0x80000000
58280+};
58281+
58282+/* ACL Object-only mode flags */
58283+enum {
58284+ GR_READ = 0x00000001,
58285+ GR_APPEND = 0x00000002,
58286+ GR_WRITE = 0x00000004,
58287+ GR_EXEC = 0x00000008,
58288+ GR_FIND = 0x00000010,
58289+ GR_INHERIT = 0x00000020,
58290+ GR_SETID = 0x00000040,
58291+ GR_CREATE = 0x00000080,
58292+ GR_DELETE = 0x00000100,
58293+ GR_LINK = 0x00000200,
58294+ GR_AUDIT_READ = 0x00000400,
58295+ GR_AUDIT_APPEND = 0x00000800,
58296+ GR_AUDIT_WRITE = 0x00001000,
58297+ GR_AUDIT_EXEC = 0x00002000,
58298+ GR_AUDIT_FIND = 0x00004000,
58299+ GR_AUDIT_INHERIT= 0x00008000,
58300+ GR_AUDIT_SETID = 0x00010000,
58301+ GR_AUDIT_CREATE = 0x00020000,
58302+ GR_AUDIT_DELETE = 0x00040000,
58303+ GR_AUDIT_LINK = 0x00080000,
58304+ GR_PTRACERD = 0x00100000,
58305+ GR_NOPTRACE = 0x00200000,
58306+ GR_SUPPRESS = 0x00400000,
58307+ GR_NOLEARN = 0x00800000,
58308+ GR_INIT_TRANSFER= 0x01000000
58309+};
58310+
58311+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58312+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58313+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58314+
58315+/* ACL subject-only mode flags */
58316+enum {
58317+ GR_KILL = 0x00000001,
58318+ GR_VIEW = 0x00000002,
58319+ GR_PROTECTED = 0x00000004,
58320+ GR_LEARN = 0x00000008,
58321+ GR_OVERRIDE = 0x00000010,
58322+ /* just a placeholder, this mode is only used in userspace */
58323+ GR_DUMMY = 0x00000020,
58324+ GR_PROTSHM = 0x00000040,
58325+ GR_KILLPROC = 0x00000080,
58326+ GR_KILLIPPROC = 0x00000100,
58327+ /* just a placeholder, this mode is only used in userspace */
58328+ GR_NOTROJAN = 0x00000200,
58329+ GR_PROTPROCFD = 0x00000400,
58330+ GR_PROCACCT = 0x00000800,
58331+ GR_RELAXPTRACE = 0x00001000,
58332+ GR_NESTED = 0x00002000,
58333+ GR_INHERITLEARN = 0x00004000,
58334+ GR_PROCFIND = 0x00008000,
58335+ GR_POVERRIDE = 0x00010000,
58336+ GR_KERNELAUTH = 0x00020000,
58337+ GR_ATSECURE = 0x00040000,
58338+ GR_SHMEXEC = 0x00080000
58339+};
58340+
58341+enum {
58342+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58343+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58344+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58345+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58346+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58347+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58348+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58349+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58350+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58351+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58352+};
58353+
58354+enum {
58355+ GR_ID_USER = 0x01,
58356+ GR_ID_GROUP = 0x02,
58357+};
58358+
58359+enum {
58360+ GR_ID_ALLOW = 0x01,
58361+ GR_ID_DENY = 0x02,
58362+};
58363+
58364+#define GR_CRASH_RES 31
58365+#define GR_UIDTABLE_MAX 500
58366+
58367+/* begin resource learning section */
58368+enum {
58369+ GR_RLIM_CPU_BUMP = 60,
58370+ GR_RLIM_FSIZE_BUMP = 50000,
58371+ GR_RLIM_DATA_BUMP = 10000,
58372+ GR_RLIM_STACK_BUMP = 1000,
58373+ GR_RLIM_CORE_BUMP = 10000,
58374+ GR_RLIM_RSS_BUMP = 500000,
58375+ GR_RLIM_NPROC_BUMP = 1,
58376+ GR_RLIM_NOFILE_BUMP = 5,
58377+ GR_RLIM_MEMLOCK_BUMP = 50000,
58378+ GR_RLIM_AS_BUMP = 500000,
58379+ GR_RLIM_LOCKS_BUMP = 2,
58380+ GR_RLIM_SIGPENDING_BUMP = 5,
58381+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58382+ GR_RLIM_NICE_BUMP = 1,
58383+ GR_RLIM_RTPRIO_BUMP = 1,
58384+ GR_RLIM_RTTIME_BUMP = 1000000
58385+};
58386+
58387+#endif
58388diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
58389new file mode 100644
58390index 0000000..da390f1
58391--- /dev/null
58392+++ b/include/linux/grinternal.h
58393@@ -0,0 +1,221 @@
58394+#ifndef __GRINTERNAL_H
58395+#define __GRINTERNAL_H
58396+
58397+#ifdef CONFIG_GRKERNSEC
58398+
58399+#include <linux/fs.h>
58400+#include <linux/mnt_namespace.h>
58401+#include <linux/nsproxy.h>
58402+#include <linux/gracl.h>
58403+#include <linux/grdefs.h>
58404+#include <linux/grmsg.h>
58405+
58406+void gr_add_learn_entry(const char *fmt, ...)
58407+ __attribute__ ((format (printf, 1, 2)));
58408+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58409+ const struct vfsmount *mnt);
58410+__u32 gr_check_create(const struct dentry *new_dentry,
58411+ const struct dentry *parent,
58412+ const struct vfsmount *mnt, const __u32 mode);
58413+int gr_check_protected_task(const struct task_struct *task);
58414+__u32 to_gr_audit(const __u32 reqmode);
58415+int gr_set_acls(const int type);
58416+int gr_apply_subject_to_task(struct task_struct *task);
58417+int gr_acl_is_enabled(void);
58418+char gr_roletype_to_char(void);
58419+
58420+void gr_handle_alertkill(struct task_struct *task);
58421+char *gr_to_filename(const struct dentry *dentry,
58422+ const struct vfsmount *mnt);
58423+char *gr_to_filename1(const struct dentry *dentry,
58424+ const struct vfsmount *mnt);
58425+char *gr_to_filename2(const struct dentry *dentry,
58426+ const struct vfsmount *mnt);
58427+char *gr_to_filename3(const struct dentry *dentry,
58428+ const struct vfsmount *mnt);
58429+
58430+extern int grsec_enable_ptrace_readexec;
58431+extern int grsec_enable_harden_ptrace;
58432+extern int grsec_enable_link;
58433+extern int grsec_enable_fifo;
58434+extern int grsec_enable_execve;
58435+extern int grsec_enable_shm;
58436+extern int grsec_enable_execlog;
58437+extern int grsec_enable_signal;
58438+extern int grsec_enable_audit_ptrace;
58439+extern int grsec_enable_forkfail;
58440+extern int grsec_enable_time;
58441+extern int grsec_enable_rofs;
58442+extern int grsec_enable_chroot_shmat;
58443+extern int grsec_enable_chroot_mount;
58444+extern int grsec_enable_chroot_double;
58445+extern int grsec_enable_chroot_pivot;
58446+extern int grsec_enable_chroot_chdir;
58447+extern int grsec_enable_chroot_chmod;
58448+extern int grsec_enable_chroot_mknod;
58449+extern int grsec_enable_chroot_fchdir;
58450+extern int grsec_enable_chroot_nice;
58451+extern int grsec_enable_chroot_execlog;
58452+extern int grsec_enable_chroot_caps;
58453+extern int grsec_enable_chroot_sysctl;
58454+extern int grsec_enable_chroot_unix;
58455+extern int grsec_enable_tpe;
58456+extern int grsec_tpe_gid;
58457+extern int grsec_enable_tpe_all;
58458+extern int grsec_enable_tpe_invert;
58459+extern int grsec_enable_socket_all;
58460+extern int grsec_socket_all_gid;
58461+extern int grsec_enable_socket_client;
58462+extern int grsec_socket_client_gid;
58463+extern int grsec_enable_socket_server;
58464+extern int grsec_socket_server_gid;
58465+extern int grsec_audit_gid;
58466+extern int grsec_enable_group;
58467+extern int grsec_enable_audit_textrel;
58468+extern int grsec_enable_log_rwxmaps;
58469+extern int grsec_enable_mount;
58470+extern int grsec_enable_chdir;
58471+extern int grsec_resource_logging;
58472+extern int grsec_enable_blackhole;
58473+extern int grsec_lastack_retries;
58474+extern int grsec_enable_brute;
58475+extern int grsec_lock;
58476+
58477+extern spinlock_t grsec_alert_lock;
58478+extern unsigned long grsec_alert_wtime;
58479+extern unsigned long grsec_alert_fyet;
58480+
58481+extern spinlock_t grsec_audit_lock;
58482+
58483+extern rwlock_t grsec_exec_file_lock;
58484+
58485+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58486+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58487+ (tsk)->exec_file->f_vfsmnt) : "/")
58488+
58489+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58490+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58491+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58492+
58493+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58494+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58495+ (tsk)->exec_file->f_vfsmnt) : "/")
58496+
58497+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58498+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58499+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58500+
58501+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58502+
58503+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58504+
58505+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58506+ (task)->pid, (cred)->uid, \
58507+ (cred)->euid, (cred)->gid, (cred)->egid, \
58508+ gr_parent_task_fullpath(task), \
58509+ (task)->real_parent->comm, (task)->real_parent->pid, \
58510+ (pcred)->uid, (pcred)->euid, \
58511+ (pcred)->gid, (pcred)->egid
58512+
58513+#define GR_CHROOT_CAPS {{ \
58514+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58515+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58516+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58517+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58518+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58519+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
58520+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58521+
58522+#define security_learn(normal_msg,args...) \
58523+({ \
58524+ read_lock(&grsec_exec_file_lock); \
58525+ gr_add_learn_entry(normal_msg "\n", ## args); \
58526+ read_unlock(&grsec_exec_file_lock); \
58527+})
58528+
58529+enum {
58530+ GR_DO_AUDIT,
58531+ GR_DONT_AUDIT,
58532+ /* used for non-audit messages that we shouldn't kill the task on */
58533+ GR_DONT_AUDIT_GOOD
58534+};
58535+
58536+enum {
58537+ GR_TTYSNIFF,
58538+ GR_RBAC,
58539+ GR_RBAC_STR,
58540+ GR_STR_RBAC,
58541+ GR_RBAC_MODE2,
58542+ GR_RBAC_MODE3,
58543+ GR_FILENAME,
58544+ GR_SYSCTL_HIDDEN,
58545+ GR_NOARGS,
58546+ GR_ONE_INT,
58547+ GR_ONE_INT_TWO_STR,
58548+ GR_ONE_STR,
58549+ GR_STR_INT,
58550+ GR_TWO_STR_INT,
58551+ GR_TWO_INT,
58552+ GR_TWO_U64,
58553+ GR_THREE_INT,
58554+ GR_FIVE_INT_TWO_STR,
58555+ GR_TWO_STR,
58556+ GR_THREE_STR,
58557+ GR_FOUR_STR,
58558+ GR_STR_FILENAME,
58559+ GR_FILENAME_STR,
58560+ GR_FILENAME_TWO_INT,
58561+ GR_FILENAME_TWO_INT_STR,
58562+ GR_TEXTREL,
58563+ GR_PTRACE,
58564+ GR_RESOURCE,
58565+ GR_CAP,
58566+ GR_SIG,
58567+ GR_SIG2,
58568+ GR_CRASH1,
58569+ GR_CRASH2,
58570+ GR_PSACCT,
58571+ GR_RWXMAP
58572+};
58573+
58574+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58575+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58576+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58577+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58578+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58579+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58580+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58581+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58582+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58583+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58584+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58585+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58586+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58587+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58588+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58589+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58590+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58591+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58592+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58593+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58594+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58595+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58596+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58597+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58598+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58599+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58600+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58601+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58602+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58603+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58604+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58605+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58606+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58607+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58608+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58609+
58610+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58611+
58612+#endif
58613+
58614+#endif
58615diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
58616new file mode 100644
58617index 0000000..cf49370
58618--- /dev/null
58619+++ b/include/linux/grmsg.h
58620@@ -0,0 +1,109 @@
58621+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58622+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58623+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58624+#define GR_STOPMOD_MSG "denied modification of module state by "
58625+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58626+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58627+#define GR_IOPERM_MSG "denied use of ioperm() by "
58628+#define GR_IOPL_MSG "denied use of iopl() by "
58629+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58630+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58631+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58632+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58633+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58634+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58635+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58636+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58637+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58638+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58639+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58640+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58641+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58642+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58643+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58644+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58645+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58646+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58647+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58648+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58649+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58650+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58651+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58652+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58653+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58654+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58655+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58656+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58657+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58658+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58659+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58660+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58661+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58662+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58663+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58664+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58665+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58666+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58667+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58668+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58669+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58670+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58671+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58672+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58673+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58674+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58675+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58676+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58677+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58678+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58679+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58680+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58681+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58682+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58683+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58684+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58685+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58686+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58687+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58688+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58689+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58690+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58691+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58692+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58693+#define GR_FAILFORK_MSG "failed fork with errno %s by "
58694+#define GR_NICE_CHROOT_MSG "denied priority change by "
58695+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58696+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58697+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58698+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58699+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58700+#define GR_TIME_MSG "time set by "
58701+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58702+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58703+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58704+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58705+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58706+#define GR_BIND_MSG "denied bind() by "
58707+#define GR_CONNECT_MSG "denied connect() by "
58708+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58709+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58710+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58711+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58712+#define GR_CAP_ACL_MSG "use of %s denied for "
58713+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
58714+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58715+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58716+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58717+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58718+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58719+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58720+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58721+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58722+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58723+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58724+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58725+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58726+#define GR_VM86_MSG "denied use of vm86 by "
58727+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58728+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable suid/sgid binary %.950s by "
58729+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58730diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
58731new file mode 100644
58732index 0000000..1ca3931
58733--- /dev/null
58734+++ b/include/linux/grsecurity.h
58735@@ -0,0 +1,233 @@
58736+#ifndef GR_SECURITY_H
58737+#define GR_SECURITY_H
58738+#include <linux/fs.h>
58739+#include <linux/fs_struct.h>
58740+#include <linux/binfmts.h>
58741+#include <linux/gracl.h>
58742+
58743+/* notify of brain-dead configs */
58744+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58745+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58746+#endif
58747+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58748+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58749+#endif
58750+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58751+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58752+#endif
58753+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58754+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58755+#endif
58756+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58757+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58758+#endif
58759+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58760+#error "CONFIG_PAX enabled, but no PaX options are enabled."
58761+#endif
58762+
58763+#include <linux/compat.h>
58764+
58765+struct user_arg_ptr {
58766+#ifdef CONFIG_COMPAT
58767+ bool is_compat;
58768+#endif
58769+ union {
58770+ const char __user *const __user *native;
58771+#ifdef CONFIG_COMPAT
58772+ compat_uptr_t __user *compat;
58773+#endif
58774+ } ptr;
58775+};
58776+
58777+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58778+void gr_handle_brute_check(void);
58779+void gr_handle_kernel_exploit(void);
58780+int gr_process_user_ban(void);
58781+
58782+char gr_roletype_to_char(void);
58783+
58784+int gr_acl_enable_at_secure(void);
58785+
58786+int gr_check_user_change(int real, int effective, int fs);
58787+int gr_check_group_change(int real, int effective, int fs);
58788+
58789+void gr_del_task_from_ip_table(struct task_struct *p);
58790+
58791+int gr_pid_is_chrooted(struct task_struct *p);
58792+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58793+int gr_handle_chroot_nice(void);
58794+int gr_handle_chroot_sysctl(const int op);
58795+int gr_handle_chroot_setpriority(struct task_struct *p,
58796+ const int niceval);
58797+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58798+int gr_handle_chroot_chroot(const struct dentry *dentry,
58799+ const struct vfsmount *mnt);
58800+void gr_handle_chroot_chdir(struct path *path);
58801+int gr_handle_chroot_chmod(const struct dentry *dentry,
58802+ const struct vfsmount *mnt, const int mode);
58803+int gr_handle_chroot_mknod(const struct dentry *dentry,
58804+ const struct vfsmount *mnt, const int mode);
58805+int gr_handle_chroot_mount(const struct dentry *dentry,
58806+ const struct vfsmount *mnt,
58807+ const char *dev_name);
58808+int gr_handle_chroot_pivot(void);
58809+int gr_handle_chroot_unix(const pid_t pid);
58810+
58811+int gr_handle_rawio(const struct inode *inode);
58812+
58813+void gr_handle_ioperm(void);
58814+void gr_handle_iopl(void);
58815+
58816+int gr_tpe_allow(const struct file *file);
58817+
58818+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
58819+void gr_clear_chroot_entries(struct task_struct *task);
58820+
58821+void gr_log_forkfail(const int retval);
58822+void gr_log_timechange(void);
58823+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
58824+void gr_log_chdir(const struct dentry *dentry,
58825+ const struct vfsmount *mnt);
58826+void gr_log_chroot_exec(const struct dentry *dentry,
58827+ const struct vfsmount *mnt);
58828+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
58829+void gr_log_remount(const char *devname, const int retval);
58830+void gr_log_unmount(const char *devname, const int retval);
58831+void gr_log_mount(const char *from, const char *to, const int retval);
58832+void gr_log_textrel(struct vm_area_struct *vma);
58833+void gr_log_rwxmmap(struct file *file);
58834+void gr_log_rwxmprotect(struct file *file);
58835+
58836+int gr_handle_follow_link(const struct inode *parent,
58837+ const struct inode *inode,
58838+ const struct dentry *dentry,
58839+ const struct vfsmount *mnt);
58840+int gr_handle_fifo(const struct dentry *dentry,
58841+ const struct vfsmount *mnt,
58842+ const struct dentry *dir, const int flag,
58843+ const int acc_mode);
58844+int gr_handle_hardlink(const struct dentry *dentry,
58845+ const struct vfsmount *mnt,
58846+ struct inode *inode,
58847+ const int mode, const char *to);
58848+
58849+int gr_is_capable(const int cap);
58850+int gr_is_capable_nolog(const int cap);
58851+void gr_learn_resource(const struct task_struct *task, const int limit,
58852+ const unsigned long wanted, const int gt);
58853+void gr_copy_label(struct task_struct *tsk);
58854+void gr_handle_crash(struct task_struct *task, const int sig);
58855+int gr_handle_signal(const struct task_struct *p, const int sig);
58856+int gr_check_crash_uid(const uid_t uid);
58857+int gr_check_protected_task(const struct task_struct *task);
58858+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58859+int gr_acl_handle_mmap(const struct file *file,
58860+ const unsigned long prot);
58861+int gr_acl_handle_mprotect(const struct file *file,
58862+ const unsigned long prot);
58863+int gr_check_hidden_task(const struct task_struct *tsk);
58864+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
58865+ const struct vfsmount *mnt);
58866+__u32 gr_acl_handle_utime(const struct dentry *dentry,
58867+ const struct vfsmount *mnt);
58868+__u32 gr_acl_handle_access(const struct dentry *dentry,
58869+ const struct vfsmount *mnt, const int fmode);
58870+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
58871+ const struct vfsmount *mnt, mode_t mode);
58872+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
58873+ const struct vfsmount *mnt, mode_t mode);
58874+__u32 gr_acl_handle_chown(const struct dentry *dentry,
58875+ const struct vfsmount *mnt);
58876+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
58877+ const struct vfsmount *mnt);
58878+int gr_handle_ptrace(struct task_struct *task, const long request);
58879+int gr_handle_proc_ptrace(struct task_struct *task);
58880+__u32 gr_acl_handle_execve(const struct dentry *dentry,
58881+ const struct vfsmount *mnt);
58882+int gr_check_crash_exec(const struct file *filp);
58883+int gr_acl_is_enabled(void);
58884+void gr_set_kernel_label(struct task_struct *task);
58885+void gr_set_role_label(struct task_struct *task, const uid_t uid,
58886+ const gid_t gid);
58887+int gr_set_proc_label(const struct dentry *dentry,
58888+ const struct vfsmount *mnt,
58889+ const int unsafe_share);
58890+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
58891+ const struct vfsmount *mnt);
58892+__u32 gr_acl_handle_open(const struct dentry *dentry,
58893+ const struct vfsmount *mnt, int acc_mode);
58894+__u32 gr_acl_handle_creat(const struct dentry *dentry,
58895+ const struct dentry *p_dentry,
58896+ const struct vfsmount *p_mnt,
58897+ int open_flags, int acc_mode, const int imode);
58898+void gr_handle_create(const struct dentry *dentry,
58899+ const struct vfsmount *mnt);
58900+void gr_handle_proc_create(const struct dentry *dentry,
58901+ const struct inode *inode);
58902+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
58903+ const struct dentry *parent_dentry,
58904+ const struct vfsmount *parent_mnt,
58905+ const int mode);
58906+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
58907+ const struct dentry *parent_dentry,
58908+ const struct vfsmount *parent_mnt);
58909+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
58910+ const struct vfsmount *mnt);
58911+void gr_handle_delete(const ino_t ino, const dev_t dev);
58912+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
58913+ const struct vfsmount *mnt);
58914+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
58915+ const struct dentry *parent_dentry,
58916+ const struct vfsmount *parent_mnt,
58917+ const char *from);
58918+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
58919+ const struct dentry *parent_dentry,
58920+ const struct vfsmount *parent_mnt,
58921+ const struct dentry *old_dentry,
58922+ const struct vfsmount *old_mnt, const char *to);
58923+int gr_acl_handle_rename(struct dentry *new_dentry,
58924+ struct dentry *parent_dentry,
58925+ const struct vfsmount *parent_mnt,
58926+ struct dentry *old_dentry,
58927+ struct inode *old_parent_inode,
58928+ struct vfsmount *old_mnt, const char *newname);
58929+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58930+ struct dentry *old_dentry,
58931+ struct dentry *new_dentry,
58932+ struct vfsmount *mnt, const __u8 replace);
58933+__u32 gr_check_link(const struct dentry *new_dentry,
58934+ const struct dentry *parent_dentry,
58935+ const struct vfsmount *parent_mnt,
58936+ const struct dentry *old_dentry,
58937+ const struct vfsmount *old_mnt);
58938+int gr_acl_handle_filldir(const struct file *file, const char *name,
58939+ const unsigned int namelen, const ino_t ino);
58940+
58941+__u32 gr_acl_handle_unix(const struct dentry *dentry,
58942+ const struct vfsmount *mnt);
58943+void gr_acl_handle_exit(void);
58944+void gr_acl_handle_psacct(struct task_struct *task, const long code);
58945+int gr_acl_handle_procpidmem(const struct task_struct *task);
58946+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
58947+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
58948+void gr_audit_ptrace(struct task_struct *task);
58949+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
58950+
58951+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
58952+
58953+#ifdef CONFIG_GRKERNSEC
58954+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
58955+void gr_handle_vm86(void);
58956+void gr_handle_mem_readwrite(u64 from, u64 to);
58957+
58958+extern int grsec_enable_dmesg;
58959+extern int grsec_disable_privio;
58960+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58961+extern int grsec_enable_chroot_findtask;
58962+#endif
58963+#ifdef CONFIG_GRKERNSEC_SETXID
58964+extern int grsec_enable_setxid;
58965+#endif
58966+#endif
58967+
58968+#endif
58969diff --git a/include/linux/grsock.h b/include/linux/grsock.h
58970new file mode 100644
58971index 0000000..e7ffaaf
58972--- /dev/null
58973+++ b/include/linux/grsock.h
58974@@ -0,0 +1,19 @@
58975+#ifndef __GRSOCK_H
58976+#define __GRSOCK_H
58977+
58978+extern void gr_attach_curr_ip(const struct sock *sk);
58979+extern int gr_handle_sock_all(const int family, const int type,
58980+ const int protocol);
58981+extern int gr_handle_sock_server(const struct sockaddr *sck);
58982+extern int gr_handle_sock_server_other(const struct sock *sck);
58983+extern int gr_handle_sock_client(const struct sockaddr *sck);
58984+extern int gr_search_connect(struct socket * sock,
58985+ struct sockaddr_in * addr);
58986+extern int gr_search_bind(struct socket * sock,
58987+ struct sockaddr_in * addr);
58988+extern int gr_search_listen(struct socket * sock);
58989+extern int gr_search_accept(struct socket * sock);
58990+extern int gr_search_socket(const int domain, const int type,
58991+ const int protocol);
58992+
58993+#endif
58994diff --git a/include/linux/hid.h b/include/linux/hid.h
58995index c235e4e..f0cf7a0 100644
58996--- a/include/linux/hid.h
58997+++ b/include/linux/hid.h
58998@@ -679,7 +679,7 @@ struct hid_ll_driver {
58999 unsigned int code, int value);
59000
59001 int (*parse)(struct hid_device *hdev);
59002-};
59003+} __no_const;
59004
59005 #define PM_HINT_FULLON 1<<5
59006 #define PM_HINT_NORMAL 1<<1
59007diff --git a/include/linux/highmem.h b/include/linux/highmem.h
59008index 3a93f73..b19d0b3 100644
59009--- a/include/linux/highmem.h
59010+++ b/include/linux/highmem.h
59011@@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
59012 kunmap_atomic(kaddr, KM_USER0);
59013 }
59014
59015+static inline void sanitize_highpage(struct page *page)
59016+{
59017+ void *kaddr;
59018+ unsigned long flags;
59019+
59020+ local_irq_save(flags);
59021+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
59022+ clear_page(kaddr);
59023+ kunmap_atomic(kaddr, KM_CLEARPAGE);
59024+ local_irq_restore(flags);
59025+}
59026+
59027 static inline void zero_user_segments(struct page *page,
59028 unsigned start1, unsigned end1,
59029 unsigned start2, unsigned end2)
59030diff --git a/include/linux/i2c.h b/include/linux/i2c.h
59031index 07d103a..04ec65b 100644
59032--- a/include/linux/i2c.h
59033+++ b/include/linux/i2c.h
59034@@ -364,6 +364,7 @@ struct i2c_algorithm {
59035 /* To determine what the adapter supports */
59036 u32 (*functionality) (struct i2c_adapter *);
59037 };
59038+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59039
59040 /*
59041 * i2c_adapter is the structure used to identify a physical i2c bus along
59042diff --git a/include/linux/i2o.h b/include/linux/i2o.h
59043index a6deef4..c56a7f2 100644
59044--- a/include/linux/i2o.h
59045+++ b/include/linux/i2o.h
59046@@ -564,7 +564,7 @@ struct i2o_controller {
59047 struct i2o_device *exec; /* Executive */
59048 #if BITS_PER_LONG == 64
59049 spinlock_t context_list_lock; /* lock for context_list */
59050- atomic_t context_list_counter; /* needed for unique contexts */
59051+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59052 struct list_head context_list; /* list of context id's
59053 and pointers */
59054 #endif
59055diff --git a/include/linux/init.h b/include/linux/init.h
59056index 9146f39..885354d 100644
59057--- a/include/linux/init.h
59058+++ b/include/linux/init.h
59059@@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
59060
59061 /* Each module must use one module_init(). */
59062 #define module_init(initfn) \
59063- static inline initcall_t __inittest(void) \
59064+ static inline __used initcall_t __inittest(void) \
59065 { return initfn; } \
59066 int init_module(void) __attribute__((alias(#initfn)));
59067
59068 /* This is only required if you want to be unloadable. */
59069 #define module_exit(exitfn) \
59070- static inline exitcall_t __exittest(void) \
59071+ static inline __used exitcall_t __exittest(void) \
59072 { return exitfn; } \
59073 void cleanup_module(void) __attribute__((alias(#exitfn)));
59074
59075diff --git a/include/linux/init_task.h b/include/linux/init_task.h
59076index 32574ee..00d4ef1 100644
59077--- a/include/linux/init_task.h
59078+++ b/include/linux/init_task.h
59079@@ -128,6 +128,12 @@ extern struct cred init_cred;
59080
59081 #define INIT_TASK_COMM "swapper"
59082
59083+#ifdef CONFIG_X86
59084+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59085+#else
59086+#define INIT_TASK_THREAD_INFO
59087+#endif
59088+
59089 /*
59090 * INIT_TASK is used to set up the first task table, touch at
59091 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59092@@ -166,6 +172,7 @@ extern struct cred init_cred;
59093 RCU_INIT_POINTER(.cred, &init_cred), \
59094 .comm = INIT_TASK_COMM, \
59095 .thread = INIT_THREAD, \
59096+ INIT_TASK_THREAD_INFO \
59097 .fs = &init_fs, \
59098 .files = &init_files, \
59099 .signal = &init_signals, \
59100diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
59101index e6ca56d..8583707 100644
59102--- a/include/linux/intel-iommu.h
59103+++ b/include/linux/intel-iommu.h
59104@@ -296,7 +296,7 @@ struct iommu_flush {
59105 u8 fm, u64 type);
59106 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59107 unsigned int size_order, u64 type);
59108-};
59109+} __no_const;
59110
59111 enum {
59112 SR_DMAR_FECTL_REG,
59113diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
59114index a64b00e..464d8bc 100644
59115--- a/include/linux/interrupt.h
59116+++ b/include/linux/interrupt.h
59117@@ -441,7 +441,7 @@ enum
59118 /* map softirq index to softirq name. update 'softirq_to_name' in
59119 * kernel/softirq.c when adding a new softirq.
59120 */
59121-extern char *softirq_to_name[NR_SOFTIRQS];
59122+extern const char * const softirq_to_name[NR_SOFTIRQS];
59123
59124 /* softirq mask and active fields moved to irq_cpustat_t in
59125 * asm/hardirq.h to get better cache usage. KAO
59126@@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
59127
59128 struct softirq_action
59129 {
59130- void (*action)(struct softirq_action *);
59131+ void (*action)(void);
59132 };
59133
59134 asmlinkage void do_softirq(void);
59135 asmlinkage void __do_softirq(void);
59136-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59137+extern void open_softirq(int nr, void (*action)(void));
59138 extern void softirq_init(void);
59139 static inline void __raise_softirq_irqoff(unsigned int nr)
59140 {
59141diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
59142index 3875719..4cd454c 100644
59143--- a/include/linux/kallsyms.h
59144+++ b/include/linux/kallsyms.h
59145@@ -15,7 +15,8 @@
59146
59147 struct module;
59148
59149-#ifdef CONFIG_KALLSYMS
59150+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59151+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59152 /* Lookup the address for a symbol. Returns 0 if not found. */
59153 unsigned long kallsyms_lookup_name(const char *name);
59154
59155@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
59156 /* Stupid that this does nothing, but I didn't create this mess. */
59157 #define __print_symbol(fmt, addr)
59158 #endif /*CONFIG_KALLSYMS*/
59159+#else /* when included by kallsyms.c, vsnprintf.c, or
59160+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59161+extern void __print_symbol(const char *fmt, unsigned long address);
59162+extern int sprint_backtrace(char *buffer, unsigned long address);
59163+extern int sprint_symbol(char *buffer, unsigned long address);
59164+const char *kallsyms_lookup(unsigned long addr,
59165+ unsigned long *symbolsize,
59166+ unsigned long *offset,
59167+ char **modname, char *namebuf);
59168+#endif
59169
59170 /* This macro allows us to keep printk typechecking */
59171 static __printf(1, 2)
59172diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
59173index fa39183..40160be 100644
59174--- a/include/linux/kgdb.h
59175+++ b/include/linux/kgdb.h
59176@@ -53,7 +53,7 @@ extern int kgdb_connected;
59177 extern int kgdb_io_module_registered;
59178
59179 extern atomic_t kgdb_setting_breakpoint;
59180-extern atomic_t kgdb_cpu_doing_single_step;
59181+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59182
59183 extern struct task_struct *kgdb_usethread;
59184 extern struct task_struct *kgdb_contthread;
59185@@ -251,7 +251,7 @@ struct kgdb_arch {
59186 void (*disable_hw_break)(struct pt_regs *regs);
59187 void (*remove_all_hw_break)(void);
59188 void (*correct_hw_break)(void);
59189-};
59190+} __do_const;
59191
59192 /**
59193 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59194@@ -276,7 +276,7 @@ struct kgdb_io {
59195 void (*pre_exception) (void);
59196 void (*post_exception) (void);
59197 int is_console;
59198-};
59199+} __do_const;
59200
59201 extern struct kgdb_arch arch_kgdb_ops;
59202
59203diff --git a/include/linux/kmod.h b/include/linux/kmod.h
59204index b16f653..eb908f4 100644
59205--- a/include/linux/kmod.h
59206+++ b/include/linux/kmod.h
59207@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
59208 * usually useless though. */
59209 extern __printf(2, 3)
59210 int __request_module(bool wait, const char *name, ...);
59211+extern __printf(3, 4)
59212+int ___request_module(bool wait, char *param_name, const char *name, ...);
59213 #define request_module(mod...) __request_module(true, mod)
59214 #define request_module_nowait(mod...) __request_module(false, mod)
59215 #define try_then_request_module(x, mod...) \
59216diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
59217index d526231..086e89b 100644
59218--- a/include/linux/kvm_host.h
59219+++ b/include/linux/kvm_host.h
59220@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
59221 void vcpu_load(struct kvm_vcpu *vcpu);
59222 void vcpu_put(struct kvm_vcpu *vcpu);
59223
59224-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59225+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59226 struct module *module);
59227 void kvm_exit(void);
59228
59229@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
59230 struct kvm_guest_debug *dbg);
59231 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59232
59233-int kvm_arch_init(void *opaque);
59234+int kvm_arch_init(const void *opaque);
59235 void kvm_arch_exit(void);
59236
59237 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59238diff --git a/include/linux/libata.h b/include/linux/libata.h
59239index cafc09a..d7e7829 100644
59240--- a/include/linux/libata.h
59241+++ b/include/linux/libata.h
59242@@ -909,7 +909,7 @@ struct ata_port_operations {
59243 * fields must be pointers.
59244 */
59245 const struct ata_port_operations *inherits;
59246-};
59247+} __do_const;
59248
59249 struct ata_port_info {
59250 unsigned long flags;
59251diff --git a/include/linux/mca.h b/include/linux/mca.h
59252index 3797270..7765ede 100644
59253--- a/include/linux/mca.h
59254+++ b/include/linux/mca.h
59255@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59256 int region);
59257 void * (*mca_transform_memory)(struct mca_device *,
59258 void *memory);
59259-};
59260+} __no_const;
59261
59262 struct mca_bus {
59263 u64 default_dma_mask;
59264diff --git a/include/linux/memory.h b/include/linux/memory.h
59265index 935699b..11042cc 100644
59266--- a/include/linux/memory.h
59267+++ b/include/linux/memory.h
59268@@ -144,7 +144,7 @@ struct memory_accessor {
59269 size_t count);
59270 ssize_t (*write)(struct memory_accessor *, const char *buf,
59271 off_t offset, size_t count);
59272-};
59273+} __no_const;
59274
59275 /*
59276 * Kernel text modification mutex, used for code patching. Users of this lock
59277diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
59278index 9970337..9444122 100644
59279--- a/include/linux/mfd/abx500.h
59280+++ b/include/linux/mfd/abx500.h
59281@@ -188,6 +188,7 @@ struct abx500_ops {
59282 int (*event_registers_startup_state_get) (struct device *, u8 *);
59283 int (*startup_irq_enabled) (struct device *, unsigned int);
59284 };
59285+typedef struct abx500_ops __no_const abx500_ops_no_const;
59286
59287 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59288 void abx500_remove_ops(struct device *dev);
59289diff --git a/include/linux/mm.h b/include/linux/mm.h
59290index 4baadd1..2e0b45e 100644
59291--- a/include/linux/mm.h
59292+++ b/include/linux/mm.h
59293@@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
59294
59295 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59296 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59297+
59298+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59299+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59300+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59301+#else
59302 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59303+#endif
59304+
59305 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59306 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59307
59308@@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
59309 int set_page_dirty_lock(struct page *page);
59310 int clear_page_dirty_for_io(struct page *page);
59311
59312-/* Is the vma a continuation of the stack vma above it? */
59313-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59314-{
59315- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59316-}
59317-
59318-static inline int stack_guard_page_start(struct vm_area_struct *vma,
59319- unsigned long addr)
59320-{
59321- return (vma->vm_flags & VM_GROWSDOWN) &&
59322- (vma->vm_start == addr) &&
59323- !vma_growsdown(vma->vm_prev, addr);
59324-}
59325-
59326-/* Is the vma a continuation of the stack vma below it? */
59327-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59328-{
59329- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59330-}
59331-
59332-static inline int stack_guard_page_end(struct vm_area_struct *vma,
59333- unsigned long addr)
59334-{
59335- return (vma->vm_flags & VM_GROWSUP) &&
59336- (vma->vm_end == addr) &&
59337- !vma_growsup(vma->vm_next, addr);
59338-}
59339-
59340 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59341 unsigned long old_addr, struct vm_area_struct *new_vma,
59342 unsigned long new_addr, unsigned long len);
59343@@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
59344 }
59345 #endif
59346
59347+#ifdef CONFIG_MMU
59348+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59349+#else
59350+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59351+{
59352+ return __pgprot(0);
59353+}
59354+#endif
59355+
59356 int vma_wants_writenotify(struct vm_area_struct *vma);
59357
59358 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59359@@ -1419,6 +1407,7 @@ out:
59360 }
59361
59362 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59363+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59364
59365 extern unsigned long do_brk(unsigned long, unsigned long);
59366
59367@@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
59368 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59369 struct vm_area_struct **pprev);
59370
59371+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59372+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59373+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59374+
59375 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59376 NULL if none. Assume start_addr < end_addr. */
59377 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59378@@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
59379 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59380 }
59381
59382-#ifdef CONFIG_MMU
59383-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59384-#else
59385-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59386-{
59387- return __pgprot(0);
59388-}
59389-#endif
59390-
59391 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59392 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59393 unsigned long pfn, unsigned long size, pgprot_t);
59394@@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn);
59395 extern int sysctl_memory_failure_early_kill;
59396 extern int sysctl_memory_failure_recovery;
59397 extern void shake_page(struct page *p, int access);
59398-extern atomic_long_t mce_bad_pages;
59399+extern atomic_long_unchecked_t mce_bad_pages;
59400 extern int soft_offline_page(struct page *page, int flags);
59401
59402 extern void dump_page(struct page *page);
59403@@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
59404 unsigned int pages_per_huge_page);
59405 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59406
59407+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59408+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59409+#else
59410+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59411+#endif
59412+
59413 #endif /* __KERNEL__ */
59414 #endif /* _LINUX_MM_H */
59415diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
59416index 5b42f1b..759e4b4 100644
59417--- a/include/linux/mm_types.h
59418+++ b/include/linux/mm_types.h
59419@@ -253,6 +253,8 @@ struct vm_area_struct {
59420 #ifdef CONFIG_NUMA
59421 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59422 #endif
59423+
59424+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59425 };
59426
59427 struct core_thread {
59428@@ -389,6 +391,24 @@ struct mm_struct {
59429 #ifdef CONFIG_CPUMASK_OFFSTACK
59430 struct cpumask cpumask_allocation;
59431 #endif
59432+
59433+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59434+ unsigned long pax_flags;
59435+#endif
59436+
59437+#ifdef CONFIG_PAX_DLRESOLVE
59438+ unsigned long call_dl_resolve;
59439+#endif
59440+
59441+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59442+ unsigned long call_syscall;
59443+#endif
59444+
59445+#ifdef CONFIG_PAX_ASLR
59446+ unsigned long delta_mmap; /* randomized offset */
59447+ unsigned long delta_stack; /* randomized offset */
59448+#endif
59449+
59450 };
59451
59452 static inline void mm_init_cpumask(struct mm_struct *mm)
59453diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
59454index 1d1b1e1..2a13c78 100644
59455--- a/include/linux/mmu_notifier.h
59456+++ b/include/linux/mmu_notifier.h
59457@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
59458 */
59459 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59460 ({ \
59461- pte_t __pte; \
59462+ pte_t ___pte; \
59463 struct vm_area_struct *___vma = __vma; \
59464 unsigned long ___address = __address; \
59465- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59466+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59467 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59468- __pte; \
59469+ ___pte; \
59470 })
59471
59472 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59473diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
59474index 188cb2f..d78409b 100644
59475--- a/include/linux/mmzone.h
59476+++ b/include/linux/mmzone.h
59477@@ -369,7 +369,7 @@ struct zone {
59478 unsigned long flags; /* zone flags, see below */
59479
59480 /* Zone statistics */
59481- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59482+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59483
59484 /*
59485 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59486diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
59487index 468819c..17b9db3 100644
59488--- a/include/linux/mod_devicetable.h
59489+++ b/include/linux/mod_devicetable.h
59490@@ -12,7 +12,7 @@
59491 typedef unsigned long kernel_ulong_t;
59492 #endif
59493
59494-#define PCI_ANY_ID (~0)
59495+#define PCI_ANY_ID ((__u16)~0)
59496
59497 struct pci_device_id {
59498 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59499@@ -131,7 +131,7 @@ struct usb_device_id {
59500 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59501 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59502
59503-#define HID_ANY_ID (~0)
59504+#define HID_ANY_ID (~0U)
59505
59506 struct hid_device_id {
59507 __u16 bus;
59508diff --git a/include/linux/module.h b/include/linux/module.h
59509index 3cb7839..511cb87 100644
59510--- a/include/linux/module.h
59511+++ b/include/linux/module.h
59512@@ -17,6 +17,7 @@
59513 #include <linux/moduleparam.h>
59514 #include <linux/tracepoint.h>
59515 #include <linux/export.h>
59516+#include <linux/fs.h>
59517
59518 #include <linux/percpu.h>
59519 #include <asm/module.h>
59520@@ -261,19 +262,16 @@ struct module
59521 int (*init)(void);
59522
59523 /* If this is non-NULL, vfree after init() returns */
59524- void *module_init;
59525+ void *module_init_rx, *module_init_rw;
59526
59527 /* Here is the actual code + data, vfree'd on unload. */
59528- void *module_core;
59529+ void *module_core_rx, *module_core_rw;
59530
59531 /* Here are the sizes of the init and core sections */
59532- unsigned int init_size, core_size;
59533+ unsigned int init_size_rw, core_size_rw;
59534
59535 /* The size of the executable code in each section. */
59536- unsigned int init_text_size, core_text_size;
59537-
59538- /* Size of RO sections of the module (text+rodata) */
59539- unsigned int init_ro_size, core_ro_size;
59540+ unsigned int init_size_rx, core_size_rx;
59541
59542 /* Arch-specific module values */
59543 struct mod_arch_specific arch;
59544@@ -329,6 +327,10 @@ struct module
59545 #ifdef CONFIG_EVENT_TRACING
59546 struct ftrace_event_call **trace_events;
59547 unsigned int num_trace_events;
59548+ struct file_operations trace_id;
59549+ struct file_operations trace_enable;
59550+ struct file_operations trace_format;
59551+ struct file_operations trace_filter;
59552 #endif
59553 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59554 unsigned int num_ftrace_callsites;
59555@@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr);
59556 bool is_module_percpu_address(unsigned long addr);
59557 bool is_module_text_address(unsigned long addr);
59558
59559+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59560+{
59561+
59562+#ifdef CONFIG_PAX_KERNEXEC
59563+ if (ktla_ktva(addr) >= (unsigned long)start &&
59564+ ktla_ktva(addr) < (unsigned long)start + size)
59565+ return 1;
59566+#endif
59567+
59568+ return ((void *)addr >= start && (void *)addr < start + size);
59569+}
59570+
59571+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59572+{
59573+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59574+}
59575+
59576+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59577+{
59578+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59579+}
59580+
59581+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59582+{
59583+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59584+}
59585+
59586+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59587+{
59588+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59589+}
59590+
59591 static inline int within_module_core(unsigned long addr, struct module *mod)
59592 {
59593- return (unsigned long)mod->module_core <= addr &&
59594- addr < (unsigned long)mod->module_core + mod->core_size;
59595+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59596 }
59597
59598 static inline int within_module_init(unsigned long addr, struct module *mod)
59599 {
59600- return (unsigned long)mod->module_init <= addr &&
59601- addr < (unsigned long)mod->module_init + mod->init_size;
59602+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59603 }
59604
59605 /* Search for module by name: must hold module_mutex. */
59606diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
59607index b2be02e..6a9fdb1 100644
59608--- a/include/linux/moduleloader.h
59609+++ b/include/linux/moduleloader.h
59610@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
59611 sections. Returns NULL on failure. */
59612 void *module_alloc(unsigned long size);
59613
59614+#ifdef CONFIG_PAX_KERNEXEC
59615+void *module_alloc_exec(unsigned long size);
59616+#else
59617+#define module_alloc_exec(x) module_alloc(x)
59618+#endif
59619+
59620 /* Free memory returned from module_alloc. */
59621 void module_free(struct module *mod, void *module_region);
59622
59623+#ifdef CONFIG_PAX_KERNEXEC
59624+void module_free_exec(struct module *mod, void *module_region);
59625+#else
59626+#define module_free_exec(x, y) module_free((x), (y))
59627+#endif
59628+
59629 /* Apply the given relocation to the (simplified) ELF. Return -error
59630 or 0. */
59631 int apply_relocate(Elf_Shdr *sechdrs,
59632diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
59633index 7939f63..ec6df57 100644
59634--- a/include/linux/moduleparam.h
59635+++ b/include/linux/moduleparam.h
59636@@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
59637 * @len is usually just sizeof(string).
59638 */
59639 #define module_param_string(name, string, len, perm) \
59640- static const struct kparam_string __param_string_##name \
59641+ static const struct kparam_string __param_string_##name __used \
59642 = { len, string }; \
59643 __module_param_call(MODULE_PARAM_PREFIX, name, \
59644 &param_ops_string, \
59645@@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
59646 * module_param_named() for why this might be necessary.
59647 */
59648 #define module_param_array_named(name, array, type, nump, perm) \
59649- static const struct kparam_array __param_arr_##name \
59650+ static const struct kparam_array __param_arr_##name __used \
59651 = { .max = ARRAY_SIZE(array), .num = nump, \
59652 .ops = &param_ops_##type, \
59653 .elemsize = sizeof(array[0]), .elem = array }; \
59654diff --git a/include/linux/namei.h b/include/linux/namei.h
59655index ffc0213..2c1f2cb 100644
59656--- a/include/linux/namei.h
59657+++ b/include/linux/namei.h
59658@@ -24,7 +24,7 @@ struct nameidata {
59659 unsigned seq;
59660 int last_type;
59661 unsigned depth;
59662- char *saved_names[MAX_NESTED_LINKS + 1];
59663+ const char *saved_names[MAX_NESTED_LINKS + 1];
59664
59665 /* Intent data */
59666 union {
59667@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
59668 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59669 extern void unlock_rename(struct dentry *, struct dentry *);
59670
59671-static inline void nd_set_link(struct nameidata *nd, char *path)
59672+static inline void nd_set_link(struct nameidata *nd, const char *path)
59673 {
59674 nd->saved_names[nd->depth] = path;
59675 }
59676
59677-static inline char *nd_get_link(struct nameidata *nd)
59678+static inline const char *nd_get_link(const struct nameidata *nd)
59679 {
59680 return nd->saved_names[nd->depth];
59681 }
59682diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
59683index a82ad4d..be68b4b 100644
59684--- a/include/linux/netdevice.h
59685+++ b/include/linux/netdevice.h
59686@@ -949,6 +949,7 @@ struct net_device_ops {
59687 int (*ndo_set_features)(struct net_device *dev,
59688 u32 features);
59689 };
59690+typedef struct net_device_ops __no_const net_device_ops_no_const;
59691
59692 /*
59693 * The DEVICE structure.
59694diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
59695new file mode 100644
59696index 0000000..33f4af8
59697--- /dev/null
59698+++ b/include/linux/netfilter/xt_gradm.h
59699@@ -0,0 +1,9 @@
59700+#ifndef _LINUX_NETFILTER_XT_GRADM_H
59701+#define _LINUX_NETFILTER_XT_GRADM_H 1
59702+
59703+struct xt_gradm_mtinfo {
59704+ __u16 flags;
59705+ __u16 invflags;
59706+};
59707+
59708+#endif
59709diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
59710index c65a18a..0c05f3a 100644
59711--- a/include/linux/of_pdt.h
59712+++ b/include/linux/of_pdt.h
59713@@ -32,7 +32,7 @@ struct of_pdt_ops {
59714
59715 /* return 0 on success; fill in 'len' with number of bytes in path */
59716 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
59717-};
59718+} __no_const;
59719
59720 extern void *prom_early_alloc(unsigned long size);
59721
59722diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
59723index a4c5624..79d6d88 100644
59724--- a/include/linux/oprofile.h
59725+++ b/include/linux/oprofile.h
59726@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
59727 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59728 char const * name, ulong * val);
59729
59730-/** Create a file for read-only access to an atomic_t. */
59731+/** Create a file for read-only access to an atomic_unchecked_t. */
59732 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59733- char const * name, atomic_t * val);
59734+ char const * name, atomic_unchecked_t * val);
59735
59736 /** create a directory */
59737 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59738diff --git a/include/linux/padata.h b/include/linux/padata.h
59739index 4633b2f..988bc08 100644
59740--- a/include/linux/padata.h
59741+++ b/include/linux/padata.h
59742@@ -129,7 +129,7 @@ struct parallel_data {
59743 struct padata_instance *pinst;
59744 struct padata_parallel_queue __percpu *pqueue;
59745 struct padata_serial_queue __percpu *squeue;
59746- atomic_t seq_nr;
59747+ atomic_unchecked_t seq_nr;
59748 atomic_t reorder_objects;
59749 atomic_t refcnt;
59750 unsigned int max_seq_nr;
59751diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
59752index b1f8912..c955bff 100644
59753--- a/include/linux/perf_event.h
59754+++ b/include/linux/perf_event.h
59755@@ -748,8 +748,8 @@ struct perf_event {
59756
59757 enum perf_event_active_state state;
59758 unsigned int attach_state;
59759- local64_t count;
59760- atomic64_t child_count;
59761+ local64_t count; /* PaX: fix it one day */
59762+ atomic64_unchecked_t child_count;
59763
59764 /*
59765 * These are the total time in nanoseconds that the event
59766@@ -800,8 +800,8 @@ struct perf_event {
59767 * These accumulate total time (in nanoseconds) that children
59768 * events have been enabled and running, respectively.
59769 */
59770- atomic64_t child_total_time_enabled;
59771- atomic64_t child_total_time_running;
59772+ atomic64_unchecked_t child_total_time_enabled;
59773+ atomic64_unchecked_t child_total_time_running;
59774
59775 /*
59776 * Protect attach/detach and child_list:
59777diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
59778index 77257c9..51d473a 100644
59779--- a/include/linux/pipe_fs_i.h
59780+++ b/include/linux/pipe_fs_i.h
59781@@ -46,9 +46,9 @@ struct pipe_buffer {
59782 struct pipe_inode_info {
59783 wait_queue_head_t wait;
59784 unsigned int nrbufs, curbuf, buffers;
59785- unsigned int readers;
59786- unsigned int writers;
59787- unsigned int waiting_writers;
59788+ atomic_t readers;
59789+ atomic_t writers;
59790+ atomic_t waiting_writers;
59791 unsigned int r_counter;
59792 unsigned int w_counter;
59793 struct page *tmp_page;
59794diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
59795index d3085e7..fd01052 100644
59796--- a/include/linux/pm_runtime.h
59797+++ b/include/linux/pm_runtime.h
59798@@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
59799
59800 static inline void pm_runtime_mark_last_busy(struct device *dev)
59801 {
59802- ACCESS_ONCE(dev->power.last_busy) = jiffies;
59803+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
59804 }
59805
59806 #else /* !CONFIG_PM_RUNTIME */
59807diff --git a/include/linux/poison.h b/include/linux/poison.h
59808index 79159de..f1233a9 100644
59809--- a/include/linux/poison.h
59810+++ b/include/linux/poison.h
59811@@ -19,8 +19,8 @@
59812 * under normal circumstances, used to verify that nobody uses
59813 * non-initialized list entries.
59814 */
59815-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
59816-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
59817+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
59818+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
59819
59820 /********** include/linux/timer.h **********/
59821 /*
59822diff --git a/include/linux/preempt.h b/include/linux/preempt.h
59823index 58969b2..ead129b 100644
59824--- a/include/linux/preempt.h
59825+++ b/include/linux/preempt.h
59826@@ -123,7 +123,7 @@ struct preempt_ops {
59827 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
59828 void (*sched_out)(struct preempt_notifier *notifier,
59829 struct task_struct *next);
59830-};
59831+} __no_const;
59832
59833 /**
59834 * preempt_notifier - key for installing preemption notifiers
59835diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
59836index 643b96c..ef55a9c 100644
59837--- a/include/linux/proc_fs.h
59838+++ b/include/linux/proc_fs.h
59839@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
59840 return proc_create_data(name, mode, parent, proc_fops, NULL);
59841 }
59842
59843+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
59844+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
59845+{
59846+#ifdef CONFIG_GRKERNSEC_PROC_USER
59847+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
59848+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59849+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
59850+#else
59851+ return proc_create_data(name, mode, parent, proc_fops, NULL);
59852+#endif
59853+}
59854+
59855+
59856 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
59857 mode_t mode, struct proc_dir_entry *base,
59858 read_proc_t *read_proc, void * data)
59859@@ -258,7 +271,7 @@ union proc_op {
59860 int (*proc_show)(struct seq_file *m,
59861 struct pid_namespace *ns, struct pid *pid,
59862 struct task_struct *task);
59863-};
59864+} __no_const;
59865
59866 struct ctl_table_header;
59867 struct ctl_table;
59868diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
59869index 800f113..e9ee2e3 100644
59870--- a/include/linux/ptrace.h
59871+++ b/include/linux/ptrace.h
59872@@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child);
59873 extern void exit_ptrace(struct task_struct *tracer);
59874 #define PTRACE_MODE_READ 1
59875 #define PTRACE_MODE_ATTACH 2
59876-/* Returns 0 on success, -errno on denial. */
59877-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
59878 /* Returns true on success, false on denial. */
59879 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
59880+/* Returns true on success, false on denial. */
59881+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
59882+/* Returns true on success, false on denial. */
59883+extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
59884
59885 static inline int ptrace_reparented(struct task_struct *child)
59886 {
59887diff --git a/include/linux/random.h b/include/linux/random.h
59888index 8f74538..02a1012 100644
59889--- a/include/linux/random.h
59890+++ b/include/linux/random.h
59891@@ -69,12 +69,17 @@ void srandom32(u32 seed);
59892
59893 u32 prandom32(struct rnd_state *);
59894
59895+static inline unsigned long pax_get_random_long(void)
59896+{
59897+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
59898+}
59899+
59900 /*
59901 * Handle minimum values for seeds
59902 */
59903 static inline u32 __seed(u32 x, u32 m)
59904 {
59905- return (x < m) ? x + m : x;
59906+ return (x <= m) ? x + m + 1 : x;
59907 }
59908
59909 /**
59910diff --git a/include/linux/reboot.h b/include/linux/reboot.h
59911index e0879a7..a12f962 100644
59912--- a/include/linux/reboot.h
59913+++ b/include/linux/reboot.h
59914@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
59915 * Architecture-specific implementations of sys_reboot commands.
59916 */
59917
59918-extern void machine_restart(char *cmd);
59919-extern void machine_halt(void);
59920-extern void machine_power_off(void);
59921+extern void machine_restart(char *cmd) __noreturn;
59922+extern void machine_halt(void) __noreturn;
59923+extern void machine_power_off(void) __noreturn;
59924
59925 extern void machine_shutdown(void);
59926 struct pt_regs;
59927@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
59928 */
59929
59930 extern void kernel_restart_prepare(char *cmd);
59931-extern void kernel_restart(char *cmd);
59932-extern void kernel_halt(void);
59933-extern void kernel_power_off(void);
59934+extern void kernel_restart(char *cmd) __noreturn;
59935+extern void kernel_halt(void) __noreturn;
59936+extern void kernel_power_off(void) __noreturn;
59937
59938 extern int C_A_D; /* for sysctl */
59939 void ctrl_alt_del(void);
59940@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
59941 * Emergency restart, callable from an interrupt handler.
59942 */
59943
59944-extern void emergency_restart(void);
59945+extern void emergency_restart(void) __noreturn;
59946 #include <asm/emergency-restart.h>
59947
59948 #endif
59949diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
59950index 96d465f..b084e05 100644
59951--- a/include/linux/reiserfs_fs.h
59952+++ b/include/linux/reiserfs_fs.h
59953@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
59954 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
59955
59956 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
59957-#define get_generation(s) atomic_read (&fs_generation(s))
59958+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
59959 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
59960 #define __fs_changed(gen,s) (gen != get_generation (s))
59961 #define fs_changed(gen,s) \
59962diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
59963index 52c83b6..18ed7eb 100644
59964--- a/include/linux/reiserfs_fs_sb.h
59965+++ b/include/linux/reiserfs_fs_sb.h
59966@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
59967 /* Comment? -Hans */
59968 wait_queue_head_t s_wait;
59969 /* To be obsoleted soon by per buffer seals.. -Hans */
59970- atomic_t s_generation_counter; // increased by one every time the
59971+ atomic_unchecked_t s_generation_counter; // increased by one every time the
59972 // tree gets re-balanced
59973 unsigned long s_properties; /* File system properties. Currently holds
59974 on-disk FS format */
59975diff --git a/include/linux/relay.h b/include/linux/relay.h
59976index 14a86bc..17d0700 100644
59977--- a/include/linux/relay.h
59978+++ b/include/linux/relay.h
59979@@ -159,7 +159,7 @@ struct rchan_callbacks
59980 * The callback should return 0 if successful, negative if not.
59981 */
59982 int (*remove_buf_file)(struct dentry *dentry);
59983-};
59984+} __no_const;
59985
59986 /*
59987 * CONFIG_RELAY kernel API, kernel/relay.c
59988diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
59989index c6c6084..5bf1212 100644
59990--- a/include/linux/rfkill.h
59991+++ b/include/linux/rfkill.h
59992@@ -147,6 +147,7 @@ struct rfkill_ops {
59993 void (*query)(struct rfkill *rfkill, void *data);
59994 int (*set_block)(void *data, bool blocked);
59995 };
59996+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
59997
59998 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
59999 /**
60000diff --git a/include/linux/rio.h b/include/linux/rio.h
60001index 4d50611..c6858a2 100644
60002--- a/include/linux/rio.h
60003+++ b/include/linux/rio.h
60004@@ -315,7 +315,7 @@ struct rio_ops {
60005 int mbox, void *buffer, size_t len);
60006 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
60007 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
60008-};
60009+} __no_const;
60010
60011 #define RIO_RESOURCE_MEM 0x00000100
60012 #define RIO_RESOURCE_DOORBELL 0x00000200
60013diff --git a/include/linux/rmap.h b/include/linux/rmap.h
60014index 2148b12..519b820 100644
60015--- a/include/linux/rmap.h
60016+++ b/include/linux/rmap.h
60017@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
60018 void anon_vma_init(void); /* create anon_vma_cachep */
60019 int anon_vma_prepare(struct vm_area_struct *);
60020 void unlink_anon_vmas(struct vm_area_struct *);
60021-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60022-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60023+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60024+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60025 void __anon_vma_link(struct vm_area_struct *);
60026
60027 static inline void anon_vma_merge(struct vm_area_struct *vma,
60028diff --git a/include/linux/sched.h b/include/linux/sched.h
60029index 1c4f3e9..e96dced 100644
60030--- a/include/linux/sched.h
60031+++ b/include/linux/sched.h
60032@@ -101,6 +101,7 @@ struct bio_list;
60033 struct fs_struct;
60034 struct perf_event_context;
60035 struct blk_plug;
60036+struct linux_binprm;
60037
60038 /*
60039 * List of flags we want to share for kernel threads,
60040@@ -380,10 +381,13 @@ struct user_namespace;
60041 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60042
60043 extern int sysctl_max_map_count;
60044+extern unsigned long sysctl_heap_stack_gap;
60045
60046 #include <linux/aio.h>
60047
60048 #ifdef CONFIG_MMU
60049+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60050+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60051 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60052 extern unsigned long
60053 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60054@@ -629,6 +633,17 @@ struct signal_struct {
60055 #ifdef CONFIG_TASKSTATS
60056 struct taskstats *stats;
60057 #endif
60058+
60059+#ifdef CONFIG_GRKERNSEC
60060+ u32 curr_ip;
60061+ u32 saved_ip;
60062+ u32 gr_saddr;
60063+ u32 gr_daddr;
60064+ u16 gr_sport;
60065+ u16 gr_dport;
60066+ u8 used_accept:1;
60067+#endif
60068+
60069 #ifdef CONFIG_AUDIT
60070 unsigned audit_tty;
60071 struct tty_audit_buf *tty_audit_buf;
60072@@ -710,6 +725,11 @@ struct user_struct {
60073 struct key *session_keyring; /* UID's default session keyring */
60074 #endif
60075
60076+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60077+ unsigned int banned;
60078+ unsigned long ban_expires;
60079+#endif
60080+
60081 /* Hash table maintenance information */
60082 struct hlist_node uidhash_node;
60083 uid_t uid;
60084@@ -1337,8 +1357,8 @@ struct task_struct {
60085 struct list_head thread_group;
60086
60087 struct completion *vfork_done; /* for vfork() */
60088- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60089- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60090+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60091+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60092
60093 cputime_t utime, stime, utimescaled, stimescaled;
60094 cputime_t gtime;
60095@@ -1354,13 +1374,6 @@ struct task_struct {
60096 struct task_cputime cputime_expires;
60097 struct list_head cpu_timers[3];
60098
60099-/* process credentials */
60100- const struct cred __rcu *real_cred; /* objective and real subjective task
60101- * credentials (COW) */
60102- const struct cred __rcu *cred; /* effective (overridable) subjective task
60103- * credentials (COW) */
60104- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60105-
60106 char comm[TASK_COMM_LEN]; /* executable name excluding path
60107 - access with [gs]et_task_comm (which lock
60108 it with task_lock())
60109@@ -1377,8 +1390,16 @@ struct task_struct {
60110 #endif
60111 /* CPU-specific state of this task */
60112 struct thread_struct thread;
60113+/* thread_info moved to task_struct */
60114+#ifdef CONFIG_X86
60115+ struct thread_info tinfo;
60116+#endif
60117 /* filesystem information */
60118 struct fs_struct *fs;
60119+
60120+ const struct cred __rcu *cred; /* effective (overridable) subjective task
60121+ * credentials (COW) */
60122+
60123 /* open file information */
60124 struct files_struct *files;
60125 /* namespaces */
60126@@ -1425,6 +1446,11 @@ struct task_struct {
60127 struct rt_mutex_waiter *pi_blocked_on;
60128 #endif
60129
60130+/* process credentials */
60131+ const struct cred __rcu *real_cred; /* objective and real subjective task
60132+ * credentials (COW) */
60133+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60134+
60135 #ifdef CONFIG_DEBUG_MUTEXES
60136 /* mutex deadlock detection */
60137 struct mutex_waiter *blocked_on;
60138@@ -1540,6 +1566,22 @@ struct task_struct {
60139 unsigned long default_timer_slack_ns;
60140
60141 struct list_head *scm_work_list;
60142+
60143+#ifdef CONFIG_GRKERNSEC
60144+ /* grsecurity */
60145+ const struct cred *delayed_cred;
60146+ struct dentry *gr_chroot_dentry;
60147+ struct acl_subject_label *acl;
60148+ struct acl_role_label *role;
60149+ struct file *exec_file;
60150+ u16 acl_role_id;
60151+ /* is this the task that authenticated to the special role */
60152+ u8 acl_sp_role;
60153+ u8 is_writable;
60154+ u8 brute;
60155+ u8 gr_is_chrooted;
60156+#endif
60157+
60158 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60159 /* Index of current stored address in ret_stack */
60160 int curr_ret_stack;
60161@@ -1574,6 +1616,51 @@ struct task_struct {
60162 #endif
60163 };
60164
60165+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60166+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60167+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60168+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60169+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60170+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60171+
60172+#ifdef CONFIG_PAX_SOFTMODE
60173+extern int pax_softmode;
60174+#endif
60175+
60176+extern int pax_check_flags(unsigned long *);
60177+
60178+/* if tsk != current then task_lock must be held on it */
60179+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60180+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60181+{
60182+ if (likely(tsk->mm))
60183+ return tsk->mm->pax_flags;
60184+ else
60185+ return 0UL;
60186+}
60187+
60188+/* if tsk != current then task_lock must be held on it */
60189+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60190+{
60191+ if (likely(tsk->mm)) {
60192+ tsk->mm->pax_flags = flags;
60193+ return 0;
60194+ }
60195+ return -EINVAL;
60196+}
60197+#endif
60198+
60199+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60200+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60201+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60202+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60203+#endif
60204+
60205+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60206+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60207+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60208+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60209+
60210 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60211 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60212
60213@@ -2081,7 +2168,9 @@ void yield(void);
60214 extern struct exec_domain default_exec_domain;
60215
60216 union thread_union {
60217+#ifndef CONFIG_X86
60218 struct thread_info thread_info;
60219+#endif
60220 unsigned long stack[THREAD_SIZE/sizeof(long)];
60221 };
60222
60223@@ -2114,6 +2203,7 @@ extern struct pid_namespace init_pid_ns;
60224 */
60225
60226 extern struct task_struct *find_task_by_vpid(pid_t nr);
60227+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60228 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60229 struct pid_namespace *ns);
60230
60231@@ -2251,7 +2341,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
60232 extern void exit_itimers(struct signal_struct *);
60233 extern void flush_itimer_signals(void);
60234
60235-extern NORET_TYPE void do_group_exit(int);
60236+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60237
60238 extern void daemonize(const char *, ...);
60239 extern int allow_signal(int);
60240@@ -2416,13 +2506,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
60241
60242 #endif
60243
60244-static inline int object_is_on_stack(void *obj)
60245+static inline int object_starts_on_stack(void *obj)
60246 {
60247- void *stack = task_stack_page(current);
60248+ const void *stack = task_stack_page(current);
60249
60250 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60251 }
60252
60253+#ifdef CONFIG_PAX_USERCOPY
60254+extern int object_is_on_stack(const void *obj, unsigned long len);
60255+#endif
60256+
60257 extern void thread_info_cache_init(void);
60258
60259 #ifdef CONFIG_DEBUG_STACK_USAGE
60260diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
60261index 899fbb4..1cb4138 100644
60262--- a/include/linux/screen_info.h
60263+++ b/include/linux/screen_info.h
60264@@ -43,7 +43,8 @@ struct screen_info {
60265 __u16 pages; /* 0x32 */
60266 __u16 vesa_attributes; /* 0x34 */
60267 __u32 capabilities; /* 0x36 */
60268- __u8 _reserved[6]; /* 0x3a */
60269+ __u16 vesapm_size; /* 0x3a */
60270+ __u8 _reserved[4]; /* 0x3c */
60271 } __attribute__((packed));
60272
60273 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60274diff --git a/include/linux/security.h b/include/linux/security.h
60275index e8c619d..e0cbd1c 100644
60276--- a/include/linux/security.h
60277+++ b/include/linux/security.h
60278@@ -37,6 +37,7 @@
60279 #include <linux/xfrm.h>
60280 #include <linux/slab.h>
60281 #include <linux/xattr.h>
60282+#include <linux/grsecurity.h>
60283 #include <net/flow.h>
60284
60285 /* Maximum number of letters for an LSM name string */
60286diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
60287index 0b69a46..e9e5538 100644
60288--- a/include/linux/seq_file.h
60289+++ b/include/linux/seq_file.h
60290@@ -33,6 +33,7 @@ struct seq_operations {
60291 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60292 int (*show) (struct seq_file *m, void *v);
60293 };
60294+typedef struct seq_operations __no_const seq_operations_no_const;
60295
60296 #define SEQ_SKIP 1
60297
60298diff --git a/include/linux/shm.h b/include/linux/shm.h
60299index 92808b8..c28cac4 100644
60300--- a/include/linux/shm.h
60301+++ b/include/linux/shm.h
60302@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
60303
60304 /* The task created the shm object. NULL if the task is dead. */
60305 struct task_struct *shm_creator;
60306+#ifdef CONFIG_GRKERNSEC
60307+ time_t shm_createtime;
60308+ pid_t shm_lapid;
60309+#endif
60310 };
60311
60312 /* shm_mode upper byte flags */
60313diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
60314index fe86488..1563c1c 100644
60315--- a/include/linux/skbuff.h
60316+++ b/include/linux/skbuff.h
60317@@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
60318 */
60319 static inline int skb_queue_empty(const struct sk_buff_head *list)
60320 {
60321- return list->next == (struct sk_buff *)list;
60322+ return list->next == (const struct sk_buff *)list;
60323 }
60324
60325 /**
60326@@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
60327 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60328 const struct sk_buff *skb)
60329 {
60330- return skb->next == (struct sk_buff *)list;
60331+ return skb->next == (const struct sk_buff *)list;
60332 }
60333
60334 /**
60335@@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60336 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60337 const struct sk_buff *skb)
60338 {
60339- return skb->prev == (struct sk_buff *)list;
60340+ return skb->prev == (const struct sk_buff *)list;
60341 }
60342
60343 /**
60344@@ -1523,7 +1523,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
60345 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
60346 */
60347 #ifndef NET_SKB_PAD
60348-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
60349+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
60350 #endif
60351
60352 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60353diff --git a/include/linux/slab.h b/include/linux/slab.h
60354index 573c809..e84c132 100644
60355--- a/include/linux/slab.h
60356+++ b/include/linux/slab.h
60357@@ -11,12 +11,20 @@
60358
60359 #include <linux/gfp.h>
60360 #include <linux/types.h>
60361+#include <linux/err.h>
60362
60363 /*
60364 * Flags to pass to kmem_cache_create().
60365 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60366 */
60367 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60368+
60369+#ifdef CONFIG_PAX_USERCOPY
60370+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60371+#else
60372+#define SLAB_USERCOPY 0x00000000UL
60373+#endif
60374+
60375 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60376 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60377 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60378@@ -87,10 +95,13 @@
60379 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60380 * Both make kfree a no-op.
60381 */
60382-#define ZERO_SIZE_PTR ((void *)16)
60383+#define ZERO_SIZE_PTR \
60384+({ \
60385+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60386+ (void *)(-MAX_ERRNO-1L); \
60387+})
60388
60389-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60390- (unsigned long)ZERO_SIZE_PTR)
60391+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60392
60393 /*
60394 * struct kmem_cache related prototypes
60395@@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
60396 void kfree(const void *);
60397 void kzfree(const void *);
60398 size_t ksize(const void *);
60399+void check_object_size(const void *ptr, unsigned long n, bool to);
60400
60401 /*
60402 * Allocator specific definitions. These are mainly used to establish optimized
60403@@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
60404
60405 void __init kmem_cache_init_late(void);
60406
60407+#define kmalloc(x, y) \
60408+({ \
60409+ void *___retval; \
60410+ intoverflow_t ___x = (intoverflow_t)x; \
60411+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60412+ ___retval = NULL; \
60413+ else \
60414+ ___retval = kmalloc((size_t)___x, (y)); \
60415+ ___retval; \
60416+})
60417+
60418+#define kmalloc_node(x, y, z) \
60419+({ \
60420+ void *___retval; \
60421+ intoverflow_t ___x = (intoverflow_t)x; \
60422+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60423+ ___retval = NULL; \
60424+ else \
60425+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60426+ ___retval; \
60427+})
60428+
60429+#define kzalloc(x, y) \
60430+({ \
60431+ void *___retval; \
60432+ intoverflow_t ___x = (intoverflow_t)x; \
60433+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60434+ ___retval = NULL; \
60435+ else \
60436+ ___retval = kzalloc((size_t)___x, (y)); \
60437+ ___retval; \
60438+})
60439+
60440+#define __krealloc(x, y, z) \
60441+({ \
60442+ void *___retval; \
60443+ intoverflow_t ___y = (intoverflow_t)y; \
60444+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60445+ ___retval = NULL; \
60446+ else \
60447+ ___retval = __krealloc((x), (size_t)___y, (z)); \
60448+ ___retval; \
60449+})
60450+
60451+#define krealloc(x, y, z) \
60452+({ \
60453+ void *___retval; \
60454+ intoverflow_t ___y = (intoverflow_t)y; \
60455+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60456+ ___retval = NULL; \
60457+ else \
60458+ ___retval = krealloc((x), (size_t)___y, (z)); \
60459+ ___retval; \
60460+})
60461+
60462 #endif /* _LINUX_SLAB_H */
60463diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
60464index d00e0ba..1b3bf7b 100644
60465--- a/include/linux/slab_def.h
60466+++ b/include/linux/slab_def.h
60467@@ -68,10 +68,10 @@ struct kmem_cache {
60468 unsigned long node_allocs;
60469 unsigned long node_frees;
60470 unsigned long node_overflow;
60471- atomic_t allochit;
60472- atomic_t allocmiss;
60473- atomic_t freehit;
60474- atomic_t freemiss;
60475+ atomic_unchecked_t allochit;
60476+ atomic_unchecked_t allocmiss;
60477+ atomic_unchecked_t freehit;
60478+ atomic_unchecked_t freemiss;
60479
60480 /*
60481 * If debugging is enabled, then the allocator can add additional
60482diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
60483index a32bcfd..53b71f4 100644
60484--- a/include/linux/slub_def.h
60485+++ b/include/linux/slub_def.h
60486@@ -89,7 +89,7 @@ struct kmem_cache {
60487 struct kmem_cache_order_objects max;
60488 struct kmem_cache_order_objects min;
60489 gfp_t allocflags; /* gfp flags to use on each alloc */
60490- int refcount; /* Refcount for slab cache destroy */
60491+ atomic_t refcount; /* Refcount for slab cache destroy */
60492 void (*ctor)(void *);
60493 int inuse; /* Offset to metadata */
60494 int align; /* Alignment */
60495@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
60496 }
60497
60498 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60499-void *__kmalloc(size_t size, gfp_t flags);
60500+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60501
60502 static __always_inline void *
60503 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60504diff --git a/include/linux/sonet.h b/include/linux/sonet.h
60505index de8832d..0147b46 100644
60506--- a/include/linux/sonet.h
60507+++ b/include/linux/sonet.h
60508@@ -61,7 +61,7 @@ struct sonet_stats {
60509 #include <linux/atomic.h>
60510
60511 struct k_sonet_stats {
60512-#define __HANDLE_ITEM(i) atomic_t i
60513+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60514 __SONET_ITEMS
60515 #undef __HANDLE_ITEM
60516 };
60517diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
60518index 3d8f9c4..69f1c0a 100644
60519--- a/include/linux/sunrpc/clnt.h
60520+++ b/include/linux/sunrpc/clnt.h
60521@@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
60522 {
60523 switch (sap->sa_family) {
60524 case AF_INET:
60525- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60526+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60527 case AF_INET6:
60528- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60529+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60530 }
60531 return 0;
60532 }
60533@@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
60534 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60535 const struct sockaddr *src)
60536 {
60537- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60538+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60539 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60540
60541 dsin->sin_family = ssin->sin_family;
60542@@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
60543 if (sa->sa_family != AF_INET6)
60544 return 0;
60545
60546- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60547+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60548 }
60549
60550 #endif /* __KERNEL__ */
60551diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
60552index e775689..9e206d9 100644
60553--- a/include/linux/sunrpc/sched.h
60554+++ b/include/linux/sunrpc/sched.h
60555@@ -105,6 +105,7 @@ struct rpc_call_ops {
60556 void (*rpc_call_done)(struct rpc_task *, void *);
60557 void (*rpc_release)(void *);
60558 };
60559+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
60560
60561 struct rpc_task_setup {
60562 struct rpc_task *task;
60563diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
60564index c14fe86..393245e 100644
60565--- a/include/linux/sunrpc/svc_rdma.h
60566+++ b/include/linux/sunrpc/svc_rdma.h
60567@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60568 extern unsigned int svcrdma_max_requests;
60569 extern unsigned int svcrdma_max_req_size;
60570
60571-extern atomic_t rdma_stat_recv;
60572-extern atomic_t rdma_stat_read;
60573-extern atomic_t rdma_stat_write;
60574-extern atomic_t rdma_stat_sq_starve;
60575-extern atomic_t rdma_stat_rq_starve;
60576-extern atomic_t rdma_stat_rq_poll;
60577-extern atomic_t rdma_stat_rq_prod;
60578-extern atomic_t rdma_stat_sq_poll;
60579-extern atomic_t rdma_stat_sq_prod;
60580+extern atomic_unchecked_t rdma_stat_recv;
60581+extern atomic_unchecked_t rdma_stat_read;
60582+extern atomic_unchecked_t rdma_stat_write;
60583+extern atomic_unchecked_t rdma_stat_sq_starve;
60584+extern atomic_unchecked_t rdma_stat_rq_starve;
60585+extern atomic_unchecked_t rdma_stat_rq_poll;
60586+extern atomic_unchecked_t rdma_stat_rq_prod;
60587+extern atomic_unchecked_t rdma_stat_sq_poll;
60588+extern atomic_unchecked_t rdma_stat_sq_prod;
60589
60590 #define RPCRDMA_VERSION 1
60591
60592diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
60593index 703cfa3..0b8ca72ac 100644
60594--- a/include/linux/sysctl.h
60595+++ b/include/linux/sysctl.h
60596@@ -155,7 +155,11 @@ enum
60597 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60598 };
60599
60600-
60601+#ifdef CONFIG_PAX_SOFTMODE
60602+enum {
60603+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60604+};
60605+#endif
60606
60607 /* CTL_VM names: */
60608 enum
60609@@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
60610
60611 extern int proc_dostring(struct ctl_table *, int,
60612 void __user *, size_t *, loff_t *);
60613+extern int proc_dostring_modpriv(struct ctl_table *, int,
60614+ void __user *, size_t *, loff_t *);
60615 extern int proc_dointvec(struct ctl_table *, int,
60616 void __user *, size_t *, loff_t *);
60617 extern int proc_dointvec_minmax(struct ctl_table *, int,
60618diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
60619index ff7dc08..893e1bd 100644
60620--- a/include/linux/tty_ldisc.h
60621+++ b/include/linux/tty_ldisc.h
60622@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
60623
60624 struct module *owner;
60625
60626- int refcount;
60627+ atomic_t refcount;
60628 };
60629
60630 struct tty_ldisc {
60631diff --git a/include/linux/types.h b/include/linux/types.h
60632index 57a9723..dbe234a 100644
60633--- a/include/linux/types.h
60634+++ b/include/linux/types.h
60635@@ -213,10 +213,26 @@ typedef struct {
60636 int counter;
60637 } atomic_t;
60638
60639+#ifdef CONFIG_PAX_REFCOUNT
60640+typedef struct {
60641+ int counter;
60642+} atomic_unchecked_t;
60643+#else
60644+typedef atomic_t atomic_unchecked_t;
60645+#endif
60646+
60647 #ifdef CONFIG_64BIT
60648 typedef struct {
60649 long counter;
60650 } atomic64_t;
60651+
60652+#ifdef CONFIG_PAX_REFCOUNT
60653+typedef struct {
60654+ long counter;
60655+} atomic64_unchecked_t;
60656+#else
60657+typedef atomic64_t atomic64_unchecked_t;
60658+#endif
60659 #endif
60660
60661 struct list_head {
60662diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
60663index 5ca0951..ab496a5 100644
60664--- a/include/linux/uaccess.h
60665+++ b/include/linux/uaccess.h
60666@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
60667 long ret; \
60668 mm_segment_t old_fs = get_fs(); \
60669 \
60670- set_fs(KERNEL_DS); \
60671 pagefault_disable(); \
60672- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60673- pagefault_enable(); \
60674+ set_fs(KERNEL_DS); \
60675+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
60676 set_fs(old_fs); \
60677+ pagefault_enable(); \
60678 ret; \
60679 })
60680
60681diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
60682index 99c1b4d..bb94261 100644
60683--- a/include/linux/unaligned/access_ok.h
60684+++ b/include/linux/unaligned/access_ok.h
60685@@ -6,32 +6,32 @@
60686
60687 static inline u16 get_unaligned_le16(const void *p)
60688 {
60689- return le16_to_cpup((__le16 *)p);
60690+ return le16_to_cpup((const __le16 *)p);
60691 }
60692
60693 static inline u32 get_unaligned_le32(const void *p)
60694 {
60695- return le32_to_cpup((__le32 *)p);
60696+ return le32_to_cpup((const __le32 *)p);
60697 }
60698
60699 static inline u64 get_unaligned_le64(const void *p)
60700 {
60701- return le64_to_cpup((__le64 *)p);
60702+ return le64_to_cpup((const __le64 *)p);
60703 }
60704
60705 static inline u16 get_unaligned_be16(const void *p)
60706 {
60707- return be16_to_cpup((__be16 *)p);
60708+ return be16_to_cpup((const __be16 *)p);
60709 }
60710
60711 static inline u32 get_unaligned_be32(const void *p)
60712 {
60713- return be32_to_cpup((__be32 *)p);
60714+ return be32_to_cpup((const __be32 *)p);
60715 }
60716
60717 static inline u64 get_unaligned_be64(const void *p)
60718 {
60719- return be64_to_cpup((__be64 *)p);
60720+ return be64_to_cpup((const __be64 *)p);
60721 }
60722
60723 static inline void put_unaligned_le16(u16 val, void *p)
60724diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
60725index e5a40c3..20ab0f6 100644
60726--- a/include/linux/usb/renesas_usbhs.h
60727+++ b/include/linux/usb/renesas_usbhs.h
60728@@ -39,7 +39,7 @@ enum {
60729 */
60730 struct renesas_usbhs_driver_callback {
60731 int (*notify_hotplug)(struct platform_device *pdev);
60732-};
60733+} __no_const;
60734
60735 /*
60736 * callback functions for platform
60737@@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback {
60738 * VBUS control is needed for Host
60739 */
60740 int (*set_vbus)(struct platform_device *pdev, int enable);
60741-};
60742+} __no_const;
60743
60744 /*
60745 * parameters for renesas usbhs
60746diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
60747index 6f8fbcf..8259001 100644
60748--- a/include/linux/vermagic.h
60749+++ b/include/linux/vermagic.h
60750@@ -25,9 +25,35 @@
60751 #define MODULE_ARCH_VERMAGIC ""
60752 #endif
60753
60754+#ifdef CONFIG_PAX_REFCOUNT
60755+#define MODULE_PAX_REFCOUNT "REFCOUNT "
60756+#else
60757+#define MODULE_PAX_REFCOUNT ""
60758+#endif
60759+
60760+#ifdef CONSTIFY_PLUGIN
60761+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
60762+#else
60763+#define MODULE_CONSTIFY_PLUGIN ""
60764+#endif
60765+
60766+#ifdef STACKLEAK_PLUGIN
60767+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
60768+#else
60769+#define MODULE_STACKLEAK_PLUGIN ""
60770+#endif
60771+
60772+#ifdef CONFIG_GRKERNSEC
60773+#define MODULE_GRSEC "GRSEC "
60774+#else
60775+#define MODULE_GRSEC ""
60776+#endif
60777+
60778 #define VERMAGIC_STRING \
60779 UTS_RELEASE " " \
60780 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
60781 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
60782- MODULE_ARCH_VERMAGIC
60783+ MODULE_ARCH_VERMAGIC \
60784+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
60785+ MODULE_GRSEC
60786
60787diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
60788index 4bde182..aec92c1 100644
60789--- a/include/linux/vmalloc.h
60790+++ b/include/linux/vmalloc.h
60791@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
60792 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
60793 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
60794 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
60795+
60796+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
60797+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
60798+#endif
60799+
60800 /* bits [20..32] reserved for arch specific ioremap internals */
60801
60802 /*
60803@@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
60804 # endif
60805 #endif
60806
60807+#define vmalloc(x) \
60808+({ \
60809+ void *___retval; \
60810+ intoverflow_t ___x = (intoverflow_t)x; \
60811+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
60812+ ___retval = NULL; \
60813+ else \
60814+ ___retval = vmalloc((unsigned long)___x); \
60815+ ___retval; \
60816+})
60817+
60818+#define vzalloc(x) \
60819+({ \
60820+ void *___retval; \
60821+ intoverflow_t ___x = (intoverflow_t)x; \
60822+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
60823+ ___retval = NULL; \
60824+ else \
60825+ ___retval = vzalloc((unsigned long)___x); \
60826+ ___retval; \
60827+})
60828+
60829+#define __vmalloc(x, y, z) \
60830+({ \
60831+ void *___retval; \
60832+ intoverflow_t ___x = (intoverflow_t)x; \
60833+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
60834+ ___retval = NULL; \
60835+ else \
60836+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
60837+ ___retval; \
60838+})
60839+
60840+#define vmalloc_user(x) \
60841+({ \
60842+ void *___retval; \
60843+ intoverflow_t ___x = (intoverflow_t)x; \
60844+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
60845+ ___retval = NULL; \
60846+ else \
60847+ ___retval = vmalloc_user((unsigned long)___x); \
60848+ ___retval; \
60849+})
60850+
60851+#define vmalloc_exec(x) \
60852+({ \
60853+ void *___retval; \
60854+ intoverflow_t ___x = (intoverflow_t)x; \
60855+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
60856+ ___retval = NULL; \
60857+ else \
60858+ ___retval = vmalloc_exec((unsigned long)___x); \
60859+ ___retval; \
60860+})
60861+
60862+#define vmalloc_node(x, y) \
60863+({ \
60864+ void *___retval; \
60865+ intoverflow_t ___x = (intoverflow_t)x; \
60866+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
60867+ ___retval = NULL; \
60868+ else \
60869+ ___retval = vmalloc_node((unsigned long)___x, (y));\
60870+ ___retval; \
60871+})
60872+
60873+#define vzalloc_node(x, y) \
60874+({ \
60875+ void *___retval; \
60876+ intoverflow_t ___x = (intoverflow_t)x; \
60877+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
60878+ ___retval = NULL; \
60879+ else \
60880+ ___retval = vzalloc_node((unsigned long)___x, (y));\
60881+ ___retval; \
60882+})
60883+
60884+#define vmalloc_32(x) \
60885+({ \
60886+ void *___retval; \
60887+ intoverflow_t ___x = (intoverflow_t)x; \
60888+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
60889+ ___retval = NULL; \
60890+ else \
60891+ ___retval = vmalloc_32((unsigned long)___x); \
60892+ ___retval; \
60893+})
60894+
60895+#define vmalloc_32_user(x) \
60896+({ \
60897+void *___retval; \
60898+ intoverflow_t ___x = (intoverflow_t)x; \
60899+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
60900+ ___retval = NULL; \
60901+ else \
60902+ ___retval = vmalloc_32_user((unsigned long)___x);\
60903+ ___retval; \
60904+})
60905+
60906 #endif /* _LINUX_VMALLOC_H */
60907diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
60908index 65efb92..137adbb 100644
60909--- a/include/linux/vmstat.h
60910+++ b/include/linux/vmstat.h
60911@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
60912 /*
60913 * Zone based page accounting with per cpu differentials.
60914 */
60915-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60916+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60917
60918 static inline void zone_page_state_add(long x, struct zone *zone,
60919 enum zone_stat_item item)
60920 {
60921- atomic_long_add(x, &zone->vm_stat[item]);
60922- atomic_long_add(x, &vm_stat[item]);
60923+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
60924+ atomic_long_add_unchecked(x, &vm_stat[item]);
60925 }
60926
60927 static inline unsigned long global_page_state(enum zone_stat_item item)
60928 {
60929- long x = atomic_long_read(&vm_stat[item]);
60930+ long x = atomic_long_read_unchecked(&vm_stat[item]);
60931 #ifdef CONFIG_SMP
60932 if (x < 0)
60933 x = 0;
60934@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
60935 static inline unsigned long zone_page_state(struct zone *zone,
60936 enum zone_stat_item item)
60937 {
60938- long x = atomic_long_read(&zone->vm_stat[item]);
60939+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60940 #ifdef CONFIG_SMP
60941 if (x < 0)
60942 x = 0;
60943@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
60944 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
60945 enum zone_stat_item item)
60946 {
60947- long x = atomic_long_read(&zone->vm_stat[item]);
60948+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60949
60950 #ifdef CONFIG_SMP
60951 int cpu;
60952@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
60953
60954 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
60955 {
60956- atomic_long_inc(&zone->vm_stat[item]);
60957- atomic_long_inc(&vm_stat[item]);
60958+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
60959+ atomic_long_inc_unchecked(&vm_stat[item]);
60960 }
60961
60962 static inline void __inc_zone_page_state(struct page *page,
60963@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
60964
60965 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
60966 {
60967- atomic_long_dec(&zone->vm_stat[item]);
60968- atomic_long_dec(&vm_stat[item]);
60969+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
60970+ atomic_long_dec_unchecked(&vm_stat[item]);
60971 }
60972
60973 static inline void __dec_zone_page_state(struct page *page,
60974diff --git a/include/linux/xattr.h b/include/linux/xattr.h
60975index e5d1220..ef6e406 100644
60976--- a/include/linux/xattr.h
60977+++ b/include/linux/xattr.h
60978@@ -57,6 +57,11 @@
60979 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
60980 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
60981
60982+/* User namespace */
60983+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
60984+#define XATTR_PAX_FLAGS_SUFFIX "flags"
60985+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
60986+
60987 #ifdef __KERNEL__
60988
60989 #include <linux/types.h>
60990diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
60991index 4aeff96..b378cdc 100644
60992--- a/include/media/saa7146_vv.h
60993+++ b/include/media/saa7146_vv.h
60994@@ -163,7 +163,7 @@ struct saa7146_ext_vv
60995 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
60996
60997 /* the extension can override this */
60998- struct v4l2_ioctl_ops ops;
60999+ v4l2_ioctl_ops_no_const ops;
61000 /* pointer to the saa7146 core ops */
61001 const struct v4l2_ioctl_ops *core_ops;
61002
61003diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
61004index c7c40f1..4f01585 100644
61005--- a/include/media/v4l2-dev.h
61006+++ b/include/media/v4l2-dev.h
61007@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
61008
61009
61010 struct v4l2_file_operations {
61011- struct module *owner;
61012+ struct module * const owner;
61013 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61014 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61015 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61016@@ -68,6 +68,7 @@ struct v4l2_file_operations {
61017 int (*open) (struct file *);
61018 int (*release) (struct file *);
61019 };
61020+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61021
61022 /*
61023 * Newer version of video_device, handled by videodev2.c
61024diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
61025index 4d1c74a..65e1221 100644
61026--- a/include/media/v4l2-ioctl.h
61027+++ b/include/media/v4l2-ioctl.h
61028@@ -274,7 +274,7 @@ struct v4l2_ioctl_ops {
61029 long (*vidioc_default) (struct file *file, void *fh,
61030 bool valid_prio, int cmd, void *arg);
61031 };
61032-
61033+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61034
61035 /* v4l debugging and diagnostics */
61036
61037diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
61038index 8d55251..dfe5b0a 100644
61039--- a/include/net/caif/caif_hsi.h
61040+++ b/include/net/caif/caif_hsi.h
61041@@ -98,7 +98,7 @@ struct cfhsi_drv {
61042 void (*rx_done_cb) (struct cfhsi_drv *drv);
61043 void (*wake_up_cb) (struct cfhsi_drv *drv);
61044 void (*wake_down_cb) (struct cfhsi_drv *drv);
61045-};
61046+} __no_const;
61047
61048 /* Structure implemented by HSI device. */
61049 struct cfhsi_dev {
61050diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
61051index 9e5425b..8136ffc 100644
61052--- a/include/net/caif/cfctrl.h
61053+++ b/include/net/caif/cfctrl.h
61054@@ -52,7 +52,7 @@ struct cfctrl_rsp {
61055 void (*radioset_rsp)(void);
61056 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61057 struct cflayer *client_layer);
61058-};
61059+} __no_const;
61060
61061 /* Link Setup Parameters for CAIF-Links. */
61062 struct cfctrl_link_param {
61063@@ -101,8 +101,8 @@ struct cfctrl_request_info {
61064 struct cfctrl {
61065 struct cfsrvl serv;
61066 struct cfctrl_rsp res;
61067- atomic_t req_seq_no;
61068- atomic_t rsp_seq_no;
61069+ atomic_unchecked_t req_seq_no;
61070+ atomic_unchecked_t rsp_seq_no;
61071 struct list_head list;
61072 /* Protects from simultaneous access to first_req list */
61073 spinlock_t info_list_lock;
61074diff --git a/include/net/flow.h b/include/net/flow.h
61075index 57f15a7..0de26c6 100644
61076--- a/include/net/flow.h
61077+++ b/include/net/flow.h
61078@@ -208,6 +208,6 @@ extern struct flow_cache_object *flow_cache_lookup(
61079
61080 extern void flow_cache_flush(void);
61081 extern void flow_cache_flush_deferred(void);
61082-extern atomic_t flow_cache_genid;
61083+extern atomic_unchecked_t flow_cache_genid;
61084
61085 #endif
61086diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
61087index e9ff3fc..9d3e5c7 100644
61088--- a/include/net/inetpeer.h
61089+++ b/include/net/inetpeer.h
61090@@ -48,8 +48,8 @@ struct inet_peer {
61091 */
61092 union {
61093 struct {
61094- atomic_t rid; /* Frag reception counter */
61095- atomic_t ip_id_count; /* IP ID for the next packet */
61096+ atomic_unchecked_t rid; /* Frag reception counter */
61097+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61098 __u32 tcp_ts;
61099 __u32 tcp_ts_stamp;
61100 };
61101@@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
61102 more++;
61103 inet_peer_refcheck(p);
61104 do {
61105- old = atomic_read(&p->ip_id_count);
61106+ old = atomic_read_unchecked(&p->ip_id_count);
61107 new = old + more;
61108 if (!new)
61109 new = 1;
61110- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61111+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61112 return new;
61113 }
61114
61115diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
61116index 10422ef..662570f 100644
61117--- a/include/net/ip_fib.h
61118+++ b/include/net/ip_fib.h
61119@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
61120
61121 #define FIB_RES_SADDR(net, res) \
61122 ((FIB_RES_NH(res).nh_saddr_genid == \
61123- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61124+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61125 FIB_RES_NH(res).nh_saddr : \
61126 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61127 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61128diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
61129index e5a7b9a..f4fc44b 100644
61130--- a/include/net/ip_vs.h
61131+++ b/include/net/ip_vs.h
61132@@ -509,7 +509,7 @@ struct ip_vs_conn {
61133 struct ip_vs_conn *control; /* Master control connection */
61134 atomic_t n_control; /* Number of controlled ones */
61135 struct ip_vs_dest *dest; /* real server */
61136- atomic_t in_pkts; /* incoming packet counter */
61137+ atomic_unchecked_t in_pkts; /* incoming packet counter */
61138
61139 /* packet transmitter for different forwarding methods. If it
61140 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61141@@ -647,7 +647,7 @@ struct ip_vs_dest {
61142 __be16 port; /* port number of the server */
61143 union nf_inet_addr addr; /* IP address of the server */
61144 volatile unsigned flags; /* dest status flags */
61145- atomic_t conn_flags; /* flags to copy to conn */
61146+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61147 atomic_t weight; /* server weight */
61148
61149 atomic_t refcnt; /* reference counter */
61150diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
61151index 69b610a..fe3962c 100644
61152--- a/include/net/irda/ircomm_core.h
61153+++ b/include/net/irda/ircomm_core.h
61154@@ -51,7 +51,7 @@ typedef struct {
61155 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61156 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61157 struct ircomm_info *);
61158-} call_t;
61159+} __no_const call_t;
61160
61161 struct ircomm_cb {
61162 irda_queue_t queue;
61163diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
61164index 59ba38bc..d515662 100644
61165--- a/include/net/irda/ircomm_tty.h
61166+++ b/include/net/irda/ircomm_tty.h
61167@@ -35,6 +35,7 @@
61168 #include <linux/termios.h>
61169 #include <linux/timer.h>
61170 #include <linux/tty.h> /* struct tty_struct */
61171+#include <asm/local.h>
61172
61173 #include <net/irda/irias_object.h>
61174 #include <net/irda/ircomm_core.h>
61175@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61176 unsigned short close_delay;
61177 unsigned short closing_wait; /* time to wait before closing */
61178
61179- int open_count;
61180- int blocked_open; /* # of blocked opens */
61181+ local_t open_count;
61182+ local_t blocked_open; /* # of blocked opens */
61183
61184 /* Protect concurent access to :
61185 * o self->open_count
61186diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
61187index f2419cf..473679f 100644
61188--- a/include/net/iucv/af_iucv.h
61189+++ b/include/net/iucv/af_iucv.h
61190@@ -139,7 +139,7 @@ struct iucv_sock {
61191 struct iucv_sock_list {
61192 struct hlist_head head;
61193 rwlock_t lock;
61194- atomic_t autobind_name;
61195+ atomic_unchecked_t autobind_name;
61196 };
61197
61198 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61199diff --git a/include/net/neighbour.h b/include/net/neighbour.h
61200index 2720884..3aa5c25 100644
61201--- a/include/net/neighbour.h
61202+++ b/include/net/neighbour.h
61203@@ -122,7 +122,7 @@ struct neigh_ops {
61204 void (*error_report)(struct neighbour *, struct sk_buff *);
61205 int (*output)(struct neighbour *, struct sk_buff *);
61206 int (*connected_output)(struct neighbour *, struct sk_buff *);
61207-};
61208+} __do_const;
61209
61210 struct pneigh_entry {
61211 struct pneigh_entry *next;
61212diff --git a/include/net/netlink.h b/include/net/netlink.h
61213index cb1f350..3279d2c 100644
61214--- a/include/net/netlink.h
61215+++ b/include/net/netlink.h
61216@@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
61217 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61218 {
61219 if (mark)
61220- skb_trim(skb, (unsigned char *) mark - skb->data);
61221+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61222 }
61223
61224 /**
61225diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
61226index d786b4f..4c3dd41 100644
61227--- a/include/net/netns/ipv4.h
61228+++ b/include/net/netns/ipv4.h
61229@@ -56,8 +56,8 @@ struct netns_ipv4 {
61230
61231 unsigned int sysctl_ping_group_range[2];
61232
61233- atomic_t rt_genid;
61234- atomic_t dev_addr_genid;
61235+ atomic_unchecked_t rt_genid;
61236+ atomic_unchecked_t dev_addr_genid;
61237
61238 #ifdef CONFIG_IP_MROUTE
61239 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61240diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
61241index 6a72a58..e6a127d 100644
61242--- a/include/net/sctp/sctp.h
61243+++ b/include/net/sctp/sctp.h
61244@@ -318,9 +318,9 @@ do { \
61245
61246 #else /* SCTP_DEBUG */
61247
61248-#define SCTP_DEBUG_PRINTK(whatever...)
61249-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61250-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61251+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61252+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61253+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61254 #define SCTP_ENABLE_DEBUG
61255 #define SCTP_DISABLE_DEBUG
61256 #define SCTP_ASSERT(expr, str, func)
61257diff --git a/include/net/sock.h b/include/net/sock.h
61258index 32e3937..87a1dbc 100644
61259--- a/include/net/sock.h
61260+++ b/include/net/sock.h
61261@@ -277,7 +277,7 @@ struct sock {
61262 #ifdef CONFIG_RPS
61263 __u32 sk_rxhash;
61264 #endif
61265- atomic_t sk_drops;
61266+ atomic_unchecked_t sk_drops;
61267 int sk_rcvbuf;
61268
61269 struct sk_filter __rcu *sk_filter;
61270@@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
61271 }
61272
61273 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61274- char __user *from, char *to,
61275+ char __user *from, unsigned char *to,
61276 int copy, int offset)
61277 {
61278 if (skb->ip_summed == CHECKSUM_NONE) {
61279diff --git a/include/net/tcp.h b/include/net/tcp.h
61280index bb18c4d..bb87972 100644
61281--- a/include/net/tcp.h
61282+++ b/include/net/tcp.h
61283@@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo {
61284 char *name;
61285 sa_family_t family;
61286 const struct file_operations *seq_fops;
61287- struct seq_operations seq_ops;
61288+ seq_operations_no_const seq_ops;
61289 };
61290
61291 struct tcp_iter_state {
61292diff --git a/include/net/udp.h b/include/net/udp.h
61293index 3b285f4..0219639 100644
61294--- a/include/net/udp.h
61295+++ b/include/net/udp.h
61296@@ -237,7 +237,7 @@ struct udp_seq_afinfo {
61297 sa_family_t family;
61298 struct udp_table *udp_table;
61299 const struct file_operations *seq_fops;
61300- struct seq_operations seq_ops;
61301+ seq_operations_no_const seq_ops;
61302 };
61303
61304 struct udp_iter_state {
61305diff --git a/include/net/xfrm.h b/include/net/xfrm.h
61306index b203e14..1df3991 100644
61307--- a/include/net/xfrm.h
61308+++ b/include/net/xfrm.h
61309@@ -505,7 +505,7 @@ struct xfrm_policy {
61310 struct timer_list timer;
61311
61312 struct flow_cache_object flo;
61313- atomic_t genid;
61314+ atomic_unchecked_t genid;
61315 u32 priority;
61316 u32 index;
61317 struct xfrm_mark mark;
61318diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
61319index 1a046b1..ee0bef0 100644
61320--- a/include/rdma/iw_cm.h
61321+++ b/include/rdma/iw_cm.h
61322@@ -122,7 +122,7 @@ struct iw_cm_verbs {
61323 int backlog);
61324
61325 int (*destroy_listen)(struct iw_cm_id *cm_id);
61326-};
61327+} __no_const;
61328
61329 /**
61330 * iw_create_cm_id - Create an IW CM identifier.
61331diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
61332index 5d1a758..1dbf795 100644
61333--- a/include/scsi/libfc.h
61334+++ b/include/scsi/libfc.h
61335@@ -748,6 +748,7 @@ struct libfc_function_template {
61336 */
61337 void (*disc_stop_final) (struct fc_lport *);
61338 };
61339+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61340
61341 /**
61342 * struct fc_disc - Discovery context
61343@@ -851,7 +852,7 @@ struct fc_lport {
61344 struct fc_vport *vport;
61345
61346 /* Operational Information */
61347- struct libfc_function_template tt;
61348+ libfc_function_template_no_const tt;
61349 u8 link_up;
61350 u8 qfull;
61351 enum fc_lport_state state;
61352diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
61353index 5591ed5..13eb457 100644
61354--- a/include/scsi/scsi_device.h
61355+++ b/include/scsi/scsi_device.h
61356@@ -161,9 +161,9 @@ struct scsi_device {
61357 unsigned int max_device_blocked; /* what device_blocked counts down from */
61358 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61359
61360- atomic_t iorequest_cnt;
61361- atomic_t iodone_cnt;
61362- atomic_t ioerr_cnt;
61363+ atomic_unchecked_t iorequest_cnt;
61364+ atomic_unchecked_t iodone_cnt;
61365+ atomic_unchecked_t ioerr_cnt;
61366
61367 struct device sdev_gendev,
61368 sdev_dev;
61369diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
61370index 2a65167..91e01f8 100644
61371--- a/include/scsi/scsi_transport_fc.h
61372+++ b/include/scsi/scsi_transport_fc.h
61373@@ -711,7 +711,7 @@ struct fc_function_template {
61374 unsigned long show_host_system_hostname:1;
61375
61376 unsigned long disable_target_scan:1;
61377-};
61378+} __do_const;
61379
61380
61381 /**
61382diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
61383index 030b87c..98a6954 100644
61384--- a/include/sound/ak4xxx-adda.h
61385+++ b/include/sound/ak4xxx-adda.h
61386@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61387 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61388 unsigned char val);
61389 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61390-};
61391+} __no_const;
61392
61393 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61394
61395diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
61396index 8c05e47..2b5df97 100644
61397--- a/include/sound/hwdep.h
61398+++ b/include/sound/hwdep.h
61399@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61400 struct snd_hwdep_dsp_status *status);
61401 int (*dsp_load)(struct snd_hwdep *hw,
61402 struct snd_hwdep_dsp_image *image);
61403-};
61404+} __no_const;
61405
61406 struct snd_hwdep {
61407 struct snd_card *card;
61408diff --git a/include/sound/info.h b/include/sound/info.h
61409index 5492cc4..1a65278 100644
61410--- a/include/sound/info.h
61411+++ b/include/sound/info.h
61412@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61413 struct snd_info_buffer *buffer);
61414 void (*write)(struct snd_info_entry *entry,
61415 struct snd_info_buffer *buffer);
61416-};
61417+} __no_const;
61418
61419 struct snd_info_entry_ops {
61420 int (*open)(struct snd_info_entry *entry,
61421diff --git a/include/sound/pcm.h b/include/sound/pcm.h
61422index 0cf91b2..b70cae4 100644
61423--- a/include/sound/pcm.h
61424+++ b/include/sound/pcm.h
61425@@ -81,6 +81,7 @@ struct snd_pcm_ops {
61426 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61427 int (*ack)(struct snd_pcm_substream *substream);
61428 };
61429+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61430
61431 /*
61432 *
61433diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
61434index af1b49e..a5d55a5 100644
61435--- a/include/sound/sb16_csp.h
61436+++ b/include/sound/sb16_csp.h
61437@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
61438 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61439 int (*csp_stop) (struct snd_sb_csp * p);
61440 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61441-};
61442+} __no_const;
61443
61444 /*
61445 * CSP private data
61446diff --git a/include/sound/soc.h b/include/sound/soc.h
61447index 11cfb59..e3f93f4 100644
61448--- a/include/sound/soc.h
61449+++ b/include/sound/soc.h
61450@@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
61451 /* platform IO - used for platform DAPM */
61452 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
61453 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
61454-};
61455+} __do_const;
61456
61457 struct snd_soc_platform {
61458 const char *name;
61459diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
61460index 444cd6b..3327cc5 100644
61461--- a/include/sound/ymfpci.h
61462+++ b/include/sound/ymfpci.h
61463@@ -358,7 +358,7 @@ struct snd_ymfpci {
61464 spinlock_t reg_lock;
61465 spinlock_t voice_lock;
61466 wait_queue_head_t interrupt_sleep;
61467- atomic_t interrupt_sleep_count;
61468+ atomic_unchecked_t interrupt_sleep_count;
61469 struct snd_info_entry *proc_entry;
61470 const struct firmware *dsp_microcode;
61471 const struct firmware *controller_microcode;
61472diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
61473index a79886c..b483af6 100644
61474--- a/include/target/target_core_base.h
61475+++ b/include/target/target_core_base.h
61476@@ -346,7 +346,7 @@ struct t10_reservation_ops {
61477 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61478 int (*t10_pr_register)(struct se_cmd *);
61479 int (*t10_pr_clear)(struct se_cmd *);
61480-};
61481+} __no_const;
61482
61483 struct t10_reservation {
61484 /* Reservation effects all target ports */
61485@@ -465,8 +465,8 @@ struct se_cmd {
61486 atomic_t t_se_count;
61487 atomic_t t_task_cdbs_left;
61488 atomic_t t_task_cdbs_ex_left;
61489- atomic_t t_task_cdbs_sent;
61490- atomic_t t_transport_aborted;
61491+ atomic_unchecked_t t_task_cdbs_sent;
61492+ atomic_unchecked_t t_transport_aborted;
61493 atomic_t t_transport_active;
61494 atomic_t t_transport_complete;
61495 atomic_t t_transport_queue_active;
61496@@ -704,7 +704,7 @@ struct se_device {
61497 /* Active commands on this virtual SE device */
61498 atomic_t simple_cmds;
61499 atomic_t depth_left;
61500- atomic_t dev_ordered_id;
61501+ atomic_unchecked_t dev_ordered_id;
61502 atomic_t execute_tasks;
61503 atomic_t dev_ordered_sync;
61504 atomic_t dev_qf_count;
61505diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
61506index 1c09820..7f5ec79 100644
61507--- a/include/trace/events/irq.h
61508+++ b/include/trace/events/irq.h
61509@@ -36,7 +36,7 @@ struct softirq_action;
61510 */
61511 TRACE_EVENT(irq_handler_entry,
61512
61513- TP_PROTO(int irq, struct irqaction *action),
61514+ TP_PROTO(int irq, const struct irqaction *action),
61515
61516 TP_ARGS(irq, action),
61517
61518@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61519 */
61520 TRACE_EVENT(irq_handler_exit,
61521
61522- TP_PROTO(int irq, struct irqaction *action, int ret),
61523+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61524
61525 TP_ARGS(irq, action, ret),
61526
61527diff --git a/include/video/udlfb.h b/include/video/udlfb.h
61528index c41f308..6918de3 100644
61529--- a/include/video/udlfb.h
61530+++ b/include/video/udlfb.h
61531@@ -52,10 +52,10 @@ struct dlfb_data {
61532 u32 pseudo_palette[256];
61533 int blank_mode; /*one of FB_BLANK_ */
61534 /* blit-only rendering path metrics, exposed through sysfs */
61535- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61536- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61537- atomic_t bytes_sent; /* to usb, after compression including overhead */
61538- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61539+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61540+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61541+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61542+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61543 };
61544
61545 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
61546diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
61547index 0993a22..32ba2fe 100644
61548--- a/include/video/uvesafb.h
61549+++ b/include/video/uvesafb.h
61550@@ -177,6 +177,7 @@ struct uvesafb_par {
61551 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61552 u8 pmi_setpal; /* PMI for palette changes */
61553 u16 *pmi_base; /* protected mode interface location */
61554+ u8 *pmi_code; /* protected mode code location */
61555 void *pmi_start;
61556 void *pmi_pal;
61557 u8 *vbe_state_orig; /*
61558diff --git a/init/Kconfig b/init/Kconfig
61559index 43298f9..2f56c12 100644
61560--- a/init/Kconfig
61561+++ b/init/Kconfig
61562@@ -1214,7 +1214,7 @@ config SLUB_DEBUG
61563
61564 config COMPAT_BRK
61565 bool "Disable heap randomization"
61566- default y
61567+ default n
61568 help
61569 Randomizing heap placement makes heap exploits harder, but it
61570 also breaks ancient binaries (including anything libc5 based).
61571diff --git a/init/do_mounts.c b/init/do_mounts.c
61572index db6e5ee..7677ff7 100644
61573--- a/init/do_mounts.c
61574+++ b/init/do_mounts.c
61575@@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
61576
61577 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61578 {
61579- int err = sys_mount(name, "/root", fs, flags, data);
61580+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
61581 if (err)
61582 return err;
61583
61584- sys_chdir((const char __user __force *)"/root");
61585+ sys_chdir((const char __force_user*)"/root");
61586 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61587 printk(KERN_INFO
61588 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61589@@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
61590 va_start(args, fmt);
61591 vsprintf(buf, fmt, args);
61592 va_end(args);
61593- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61594+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61595 if (fd >= 0) {
61596 sys_ioctl(fd, FDEJECT, 0);
61597 sys_close(fd);
61598 }
61599 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61600- fd = sys_open("/dev/console", O_RDWR, 0);
61601+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
61602 if (fd >= 0) {
61603 sys_ioctl(fd, TCGETS, (long)&termios);
61604 termios.c_lflag &= ~ICANON;
61605 sys_ioctl(fd, TCSETSF, (long)&termios);
61606- sys_read(fd, &c, 1);
61607+ sys_read(fd, (char __user *)&c, 1);
61608 termios.c_lflag |= ICANON;
61609 sys_ioctl(fd, TCSETSF, (long)&termios);
61610 sys_close(fd);
61611@@ -553,6 +553,6 @@ void __init prepare_namespace(void)
61612 mount_root();
61613 out:
61614 devtmpfs_mount("dev");
61615- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61616- sys_chroot((const char __user __force *)".");
61617+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61618+ sys_chroot((const char __force_user *)".");
61619 }
61620diff --git a/init/do_mounts.h b/init/do_mounts.h
61621index f5b978a..69dbfe8 100644
61622--- a/init/do_mounts.h
61623+++ b/init/do_mounts.h
61624@@ -15,15 +15,15 @@ extern int root_mountflags;
61625
61626 static inline int create_dev(char *name, dev_t dev)
61627 {
61628- sys_unlink(name);
61629- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61630+ sys_unlink((char __force_user *)name);
61631+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
61632 }
61633
61634 #if BITS_PER_LONG == 32
61635 static inline u32 bstat(char *name)
61636 {
61637 struct stat64 stat;
61638- if (sys_stat64(name, &stat) != 0)
61639+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
61640 return 0;
61641 if (!S_ISBLK(stat.st_mode))
61642 return 0;
61643@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
61644 static inline u32 bstat(char *name)
61645 {
61646 struct stat stat;
61647- if (sys_newstat(name, &stat) != 0)
61648+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
61649 return 0;
61650 if (!S_ISBLK(stat.st_mode))
61651 return 0;
61652diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
61653index 3098a38..253064e 100644
61654--- a/init/do_mounts_initrd.c
61655+++ b/init/do_mounts_initrd.c
61656@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
61657 create_dev("/dev/root.old", Root_RAM0);
61658 /* mount initrd on rootfs' /root */
61659 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61660- sys_mkdir("/old", 0700);
61661- root_fd = sys_open("/", 0, 0);
61662- old_fd = sys_open("/old", 0, 0);
61663+ sys_mkdir((const char __force_user *)"/old", 0700);
61664+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
61665+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
61666 /* move initrd over / and chdir/chroot in initrd root */
61667- sys_chdir("/root");
61668- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61669- sys_chroot(".");
61670+ sys_chdir((const char __force_user *)"/root");
61671+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61672+ sys_chroot((const char __force_user *)".");
61673
61674 /*
61675 * In case that a resume from disk is carried out by linuxrc or one of
61676@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
61677
61678 /* move initrd to rootfs' /old */
61679 sys_fchdir(old_fd);
61680- sys_mount("/", ".", NULL, MS_MOVE, NULL);
61681+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
61682 /* switch root and cwd back to / of rootfs */
61683 sys_fchdir(root_fd);
61684- sys_chroot(".");
61685+ sys_chroot((const char __force_user *)".");
61686 sys_close(old_fd);
61687 sys_close(root_fd);
61688
61689 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61690- sys_chdir("/old");
61691+ sys_chdir((const char __force_user *)"/old");
61692 return;
61693 }
61694
61695@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
61696 mount_root();
61697
61698 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61699- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61700+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
61701 if (!error)
61702 printk("okay\n");
61703 else {
61704- int fd = sys_open("/dev/root.old", O_RDWR, 0);
61705+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
61706 if (error == -ENOENT)
61707 printk("/initrd does not exist. Ignored.\n");
61708 else
61709 printk("failed\n");
61710 printk(KERN_NOTICE "Unmounting old root\n");
61711- sys_umount("/old", MNT_DETACH);
61712+ sys_umount((char __force_user *)"/old", MNT_DETACH);
61713 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61714 if (fd < 0) {
61715 error = fd;
61716@@ -116,11 +116,11 @@ int __init initrd_load(void)
61717 * mounted in the normal path.
61718 */
61719 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61720- sys_unlink("/initrd.image");
61721+ sys_unlink((const char __force_user *)"/initrd.image");
61722 handle_initrd();
61723 return 1;
61724 }
61725 }
61726- sys_unlink("/initrd.image");
61727+ sys_unlink((const char __force_user *)"/initrd.image");
61728 return 0;
61729 }
61730diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
61731index 32c4799..c27ee74 100644
61732--- a/init/do_mounts_md.c
61733+++ b/init/do_mounts_md.c
61734@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61735 partitioned ? "_d" : "", minor,
61736 md_setup_args[ent].device_names);
61737
61738- fd = sys_open(name, 0, 0);
61739+ fd = sys_open((char __force_user *)name, 0, 0);
61740 if (fd < 0) {
61741 printk(KERN_ERR "md: open failed - cannot start "
61742 "array %s\n", name);
61743@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61744 * array without it
61745 */
61746 sys_close(fd);
61747- fd = sys_open(name, 0, 0);
61748+ fd = sys_open((char __force_user *)name, 0, 0);
61749 sys_ioctl(fd, BLKRRPART, 0);
61750 }
61751 sys_close(fd);
61752@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61753
61754 wait_for_device_probe();
61755
61756- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
61757+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
61758 if (fd >= 0) {
61759 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61760 sys_close(fd);
61761diff --git a/init/initramfs.c b/init/initramfs.c
61762index 2531811..040d4d4 100644
61763--- a/init/initramfs.c
61764+++ b/init/initramfs.c
61765@@ -74,7 +74,7 @@ static void __init free_hash(void)
61766 }
61767 }
61768
61769-static long __init do_utime(char __user *filename, time_t mtime)
61770+static long __init do_utime(__force char __user *filename, time_t mtime)
61771 {
61772 struct timespec t[2];
61773
61774@@ -109,7 +109,7 @@ static void __init dir_utime(void)
61775 struct dir_entry *de, *tmp;
61776 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61777 list_del(&de->list);
61778- do_utime(de->name, de->mtime);
61779+ do_utime((char __force_user *)de->name, de->mtime);
61780 kfree(de->name);
61781 kfree(de);
61782 }
61783@@ -271,7 +271,7 @@ static int __init maybe_link(void)
61784 if (nlink >= 2) {
61785 char *old = find_link(major, minor, ino, mode, collected);
61786 if (old)
61787- return (sys_link(old, collected) < 0) ? -1 : 1;
61788+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
61789 }
61790 return 0;
61791 }
61792@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
61793 {
61794 struct stat st;
61795
61796- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61797+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
61798 if (S_ISDIR(st.st_mode))
61799- sys_rmdir(path);
61800+ sys_rmdir((char __force_user *)path);
61801 else
61802- sys_unlink(path);
61803+ sys_unlink((char __force_user *)path);
61804 }
61805 }
61806
61807@@ -305,7 +305,7 @@ static int __init do_name(void)
61808 int openflags = O_WRONLY|O_CREAT;
61809 if (ml != 1)
61810 openflags |= O_TRUNC;
61811- wfd = sys_open(collected, openflags, mode);
61812+ wfd = sys_open((char __force_user *)collected, openflags, mode);
61813
61814 if (wfd >= 0) {
61815 sys_fchown(wfd, uid, gid);
61816@@ -317,17 +317,17 @@ static int __init do_name(void)
61817 }
61818 }
61819 } else if (S_ISDIR(mode)) {
61820- sys_mkdir(collected, mode);
61821- sys_chown(collected, uid, gid);
61822- sys_chmod(collected, mode);
61823+ sys_mkdir((char __force_user *)collected, mode);
61824+ sys_chown((char __force_user *)collected, uid, gid);
61825+ sys_chmod((char __force_user *)collected, mode);
61826 dir_add(collected, mtime);
61827 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61828 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61829 if (maybe_link() == 0) {
61830- sys_mknod(collected, mode, rdev);
61831- sys_chown(collected, uid, gid);
61832- sys_chmod(collected, mode);
61833- do_utime(collected, mtime);
61834+ sys_mknod((char __force_user *)collected, mode, rdev);
61835+ sys_chown((char __force_user *)collected, uid, gid);
61836+ sys_chmod((char __force_user *)collected, mode);
61837+ do_utime((char __force_user *)collected, mtime);
61838 }
61839 }
61840 return 0;
61841@@ -336,15 +336,15 @@ static int __init do_name(void)
61842 static int __init do_copy(void)
61843 {
61844 if (count >= body_len) {
61845- sys_write(wfd, victim, body_len);
61846+ sys_write(wfd, (char __force_user *)victim, body_len);
61847 sys_close(wfd);
61848- do_utime(vcollected, mtime);
61849+ do_utime((char __force_user *)vcollected, mtime);
61850 kfree(vcollected);
61851 eat(body_len);
61852 state = SkipIt;
61853 return 0;
61854 } else {
61855- sys_write(wfd, victim, count);
61856+ sys_write(wfd, (char __force_user *)victim, count);
61857 body_len -= count;
61858 eat(count);
61859 return 1;
61860@@ -355,9 +355,9 @@ static int __init do_symlink(void)
61861 {
61862 collected[N_ALIGN(name_len) + body_len] = '\0';
61863 clean_path(collected, 0);
61864- sys_symlink(collected + N_ALIGN(name_len), collected);
61865- sys_lchown(collected, uid, gid);
61866- do_utime(collected, mtime);
61867+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
61868+ sys_lchown((char __force_user *)collected, uid, gid);
61869+ do_utime((char __force_user *)collected, mtime);
61870 state = SkipIt;
61871 next_state = Reset;
61872 return 0;
61873diff --git a/init/main.c b/init/main.c
61874index 217ed23..32e5731 100644
61875--- a/init/main.c
61876+++ b/init/main.c
61877@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
61878 extern void tc_init(void);
61879 #endif
61880
61881+extern void grsecurity_init(void);
61882+
61883 /*
61884 * Debug helper: via this flag we know that we are in 'early bootup code'
61885 * where only the boot processor is running with IRQ disabled. This means
61886@@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
61887
61888 __setup("reset_devices", set_reset_devices);
61889
61890+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
61891+extern char pax_enter_kernel_user[];
61892+extern char pax_exit_kernel_user[];
61893+extern pgdval_t clone_pgd_mask;
61894+#endif
61895+
61896+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
61897+static int __init setup_pax_nouderef(char *str)
61898+{
61899+#ifdef CONFIG_X86_32
61900+ unsigned int cpu;
61901+ struct desc_struct *gdt;
61902+
61903+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
61904+ gdt = get_cpu_gdt_table(cpu);
61905+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
61906+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
61907+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
61908+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
61909+ }
61910+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
61911+#else
61912+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
61913+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
61914+ clone_pgd_mask = ~(pgdval_t)0UL;
61915+#endif
61916+
61917+ return 0;
61918+}
61919+early_param("pax_nouderef", setup_pax_nouderef);
61920+#endif
61921+
61922+#ifdef CONFIG_PAX_SOFTMODE
61923+int pax_softmode;
61924+
61925+static int __init setup_pax_softmode(char *str)
61926+{
61927+ get_option(&str, &pax_softmode);
61928+ return 1;
61929+}
61930+__setup("pax_softmode=", setup_pax_softmode);
61931+#endif
61932+
61933 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
61934 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
61935 static const char *panic_later, *panic_param;
61936@@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
61937 {
61938 int count = preempt_count();
61939 int ret;
61940+ const char *msg1 = "", *msg2 = "";
61941
61942 if (initcall_debug)
61943 ret = do_one_initcall_debug(fn);
61944@@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
61945 sprintf(msgbuf, "error code %d ", ret);
61946
61947 if (preempt_count() != count) {
61948- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
61949+ msg1 = " preemption imbalance";
61950 preempt_count() = count;
61951 }
61952 if (irqs_disabled()) {
61953- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
61954+ msg2 = " disabled interrupts";
61955 local_irq_enable();
61956 }
61957- if (msgbuf[0]) {
61958- printk("initcall %pF returned with %s\n", fn, msgbuf);
61959+ if (msgbuf[0] || *msg1 || *msg2) {
61960+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
61961 }
61962
61963 return ret;
61964@@ -820,7 +866,7 @@ static int __init kernel_init(void * unused)
61965 do_basic_setup();
61966
61967 /* Open the /dev/console on the rootfs, this should never fail */
61968- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
61969+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
61970 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
61971
61972 (void) sys_dup(0);
61973@@ -833,11 +879,13 @@ static int __init kernel_init(void * unused)
61974 if (!ramdisk_execute_command)
61975 ramdisk_execute_command = "/init";
61976
61977- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
61978+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
61979 ramdisk_execute_command = NULL;
61980 prepare_namespace();
61981 }
61982
61983+ grsecurity_init();
61984+
61985 /*
61986 * Ok, we have completed the initial bootup, and
61987 * we're essentially up and running. Get rid of the
61988diff --git a/ipc/mqueue.c b/ipc/mqueue.c
61989index 5b4293d..f179875 100644
61990--- a/ipc/mqueue.c
61991+++ b/ipc/mqueue.c
61992@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
61993 mq_bytes = (mq_msg_tblsz +
61994 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
61995
61996+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
61997 spin_lock(&mq_lock);
61998 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
61999 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62000diff --git a/ipc/msg.c b/ipc/msg.c
62001index 7385de2..a8180e0 100644
62002--- a/ipc/msg.c
62003+++ b/ipc/msg.c
62004@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
62005 return security_msg_queue_associate(msq, msgflg);
62006 }
62007
62008+static struct ipc_ops msg_ops = {
62009+ .getnew = newque,
62010+ .associate = msg_security,
62011+ .more_checks = NULL
62012+};
62013+
62014 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62015 {
62016 struct ipc_namespace *ns;
62017- struct ipc_ops msg_ops;
62018 struct ipc_params msg_params;
62019
62020 ns = current->nsproxy->ipc_ns;
62021
62022- msg_ops.getnew = newque;
62023- msg_ops.associate = msg_security;
62024- msg_ops.more_checks = NULL;
62025-
62026 msg_params.key = key;
62027 msg_params.flg = msgflg;
62028
62029diff --git a/ipc/sem.c b/ipc/sem.c
62030index 5215a81..cfc0cac 100644
62031--- a/ipc/sem.c
62032+++ b/ipc/sem.c
62033@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
62034 return 0;
62035 }
62036
62037+static struct ipc_ops sem_ops = {
62038+ .getnew = newary,
62039+ .associate = sem_security,
62040+ .more_checks = sem_more_checks
62041+};
62042+
62043 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62044 {
62045 struct ipc_namespace *ns;
62046- struct ipc_ops sem_ops;
62047 struct ipc_params sem_params;
62048
62049 ns = current->nsproxy->ipc_ns;
62050@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62051 if (nsems < 0 || nsems > ns->sc_semmsl)
62052 return -EINVAL;
62053
62054- sem_ops.getnew = newary;
62055- sem_ops.associate = sem_security;
62056- sem_ops.more_checks = sem_more_checks;
62057-
62058 sem_params.key = key;
62059 sem_params.flg = semflg;
62060 sem_params.u.nsems = nsems;
62061diff --git a/ipc/shm.c b/ipc/shm.c
62062index b76be5b..859e750 100644
62063--- a/ipc/shm.c
62064+++ b/ipc/shm.c
62065@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
62066 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62067 #endif
62068
62069+#ifdef CONFIG_GRKERNSEC
62070+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62071+ const time_t shm_createtime, const uid_t cuid,
62072+ const int shmid);
62073+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62074+ const time_t shm_createtime);
62075+#endif
62076+
62077 void shm_init_ns(struct ipc_namespace *ns)
62078 {
62079 ns->shm_ctlmax = SHMMAX;
62080@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
62081 shp->shm_lprid = 0;
62082 shp->shm_atim = shp->shm_dtim = 0;
62083 shp->shm_ctim = get_seconds();
62084+#ifdef CONFIG_GRKERNSEC
62085+ {
62086+ struct timespec timeval;
62087+ do_posix_clock_monotonic_gettime(&timeval);
62088+
62089+ shp->shm_createtime = timeval.tv_sec;
62090+ }
62091+#endif
62092 shp->shm_segsz = size;
62093 shp->shm_nattch = 0;
62094 shp->shm_file = file;
62095@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
62096 return 0;
62097 }
62098
62099+static struct ipc_ops shm_ops = {
62100+ .getnew = newseg,
62101+ .associate = shm_security,
62102+ .more_checks = shm_more_checks
62103+};
62104+
62105 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62106 {
62107 struct ipc_namespace *ns;
62108- struct ipc_ops shm_ops;
62109 struct ipc_params shm_params;
62110
62111 ns = current->nsproxy->ipc_ns;
62112
62113- shm_ops.getnew = newseg;
62114- shm_ops.associate = shm_security;
62115- shm_ops.more_checks = shm_more_checks;
62116-
62117 shm_params.key = key;
62118 shm_params.flg = shmflg;
62119 shm_params.u.size = size;
62120@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62121 f_mode = FMODE_READ | FMODE_WRITE;
62122 }
62123 if (shmflg & SHM_EXEC) {
62124+
62125+#ifdef CONFIG_PAX_MPROTECT
62126+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
62127+ goto out;
62128+#endif
62129+
62130 prot |= PROT_EXEC;
62131 acc_mode |= S_IXUGO;
62132 }
62133@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62134 if (err)
62135 goto out_unlock;
62136
62137+#ifdef CONFIG_GRKERNSEC
62138+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62139+ shp->shm_perm.cuid, shmid) ||
62140+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62141+ err = -EACCES;
62142+ goto out_unlock;
62143+ }
62144+#endif
62145+
62146 path = shp->shm_file->f_path;
62147 path_get(&path);
62148 shp->shm_nattch++;
62149+#ifdef CONFIG_GRKERNSEC
62150+ shp->shm_lapid = current->pid;
62151+#endif
62152 size = i_size_read(path.dentry->d_inode);
62153 shm_unlock(shp);
62154
62155diff --git a/kernel/acct.c b/kernel/acct.c
62156index fa7eb3d..7faf116 100644
62157--- a/kernel/acct.c
62158+++ b/kernel/acct.c
62159@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
62160 */
62161 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62162 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62163- file->f_op->write(file, (char *)&ac,
62164+ file->f_op->write(file, (char __force_user *)&ac,
62165 sizeof(acct_t), &file->f_pos);
62166 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62167 set_fs(fs);
62168diff --git a/kernel/audit.c b/kernel/audit.c
62169index 09fae26..ed71d5b 100644
62170--- a/kernel/audit.c
62171+++ b/kernel/audit.c
62172@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62173 3) suppressed due to audit_rate_limit
62174 4) suppressed due to audit_backlog_limit
62175 */
62176-static atomic_t audit_lost = ATOMIC_INIT(0);
62177+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62178
62179 /* The netlink socket. */
62180 static struct sock *audit_sock;
62181@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62182 unsigned long now;
62183 int print;
62184
62185- atomic_inc(&audit_lost);
62186+ atomic_inc_unchecked(&audit_lost);
62187
62188 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62189
62190@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62191 printk(KERN_WARNING
62192 "audit: audit_lost=%d audit_rate_limit=%d "
62193 "audit_backlog_limit=%d\n",
62194- atomic_read(&audit_lost),
62195+ atomic_read_unchecked(&audit_lost),
62196 audit_rate_limit,
62197 audit_backlog_limit);
62198 audit_panic(message);
62199@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
62200 status_set.pid = audit_pid;
62201 status_set.rate_limit = audit_rate_limit;
62202 status_set.backlog_limit = audit_backlog_limit;
62203- status_set.lost = atomic_read(&audit_lost);
62204+ status_set.lost = atomic_read_unchecked(&audit_lost);
62205 status_set.backlog = skb_queue_len(&audit_skb_queue);
62206 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62207 &status_set, sizeof(status_set));
62208@@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
62209 avail = audit_expand(ab,
62210 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
62211 if (!avail)
62212- goto out;
62213+ goto out_va_end;
62214 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
62215 }
62216- va_end(args2);
62217 if (len > 0)
62218 skb_put(skb, len);
62219+out_va_end:
62220+ va_end(args2);
62221 out:
62222 return;
62223 }
62224diff --git a/kernel/auditsc.c b/kernel/auditsc.c
62225index 47b7fc1..c003c33 100644
62226--- a/kernel/auditsc.c
62227+++ b/kernel/auditsc.c
62228@@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
62229 struct audit_buffer **ab,
62230 struct audit_aux_data_execve *axi)
62231 {
62232- int i;
62233- size_t len, len_sent = 0;
62234+ int i, len;
62235+ size_t len_sent = 0;
62236 const char __user *p;
62237 char *buf;
62238
62239@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
62240 }
62241
62242 /* global counter which is incremented every time something logs in */
62243-static atomic_t session_id = ATOMIC_INIT(0);
62244+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62245
62246 /**
62247 * audit_set_loginuid - set a task's audit_context loginuid
62248@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
62249 */
62250 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62251 {
62252- unsigned int sessionid = atomic_inc_return(&session_id);
62253+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62254 struct audit_context *context = task->audit_context;
62255
62256 if (context && context->in_syscall) {
62257diff --git a/kernel/capability.c b/kernel/capability.c
62258index b463871..fa3ea1f 100644
62259--- a/kernel/capability.c
62260+++ b/kernel/capability.c
62261@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
62262 * before modification is attempted and the application
62263 * fails.
62264 */
62265+ if (tocopy > ARRAY_SIZE(kdata))
62266+ return -EFAULT;
62267+
62268 if (copy_to_user(dataptr, kdata, tocopy
62269 * sizeof(struct __user_cap_data_struct))) {
62270 return -EFAULT;
62271@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
62272 BUG();
62273 }
62274
62275- if (security_capable(ns, current_cred(), cap) == 0) {
62276+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62277 current->flags |= PF_SUPERPRIV;
62278 return true;
62279 }
62280@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
62281 }
62282 EXPORT_SYMBOL(ns_capable);
62283
62284+bool ns_capable_nolog(struct user_namespace *ns, int cap)
62285+{
62286+ if (unlikely(!cap_valid(cap))) {
62287+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62288+ BUG();
62289+ }
62290+
62291+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62292+ current->flags |= PF_SUPERPRIV;
62293+ return true;
62294+ }
62295+ return false;
62296+}
62297+EXPORT_SYMBOL(ns_capable_nolog);
62298+
62299+bool capable_nolog(int cap)
62300+{
62301+ return ns_capable_nolog(&init_user_ns, cap);
62302+}
62303+EXPORT_SYMBOL(capable_nolog);
62304+
62305 /**
62306 * task_ns_capable - Determine whether current task has a superior
62307 * capability targeted at a specific task's user namespace.
62308@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
62309 }
62310 EXPORT_SYMBOL(task_ns_capable);
62311
62312+bool task_ns_capable_nolog(struct task_struct *t, int cap)
62313+{
62314+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
62315+}
62316+EXPORT_SYMBOL(task_ns_capable_nolog);
62317+
62318 /**
62319 * nsown_capable - Check superior capability to one's own user_ns
62320 * @cap: The capability in question
62321diff --git a/kernel/compat.c b/kernel/compat.c
62322index f346ced..aa2b1f4 100644
62323--- a/kernel/compat.c
62324+++ b/kernel/compat.c
62325@@ -13,6 +13,7 @@
62326
62327 #include <linux/linkage.h>
62328 #include <linux/compat.h>
62329+#include <linux/module.h>
62330 #include <linux/errno.h>
62331 #include <linux/time.h>
62332 #include <linux/signal.h>
62333@@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
62334 mm_segment_t oldfs;
62335 long ret;
62336
62337- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
62338+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
62339 oldfs = get_fs();
62340 set_fs(KERNEL_DS);
62341 ret = hrtimer_nanosleep_restart(restart);
62342@@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
62343 oldfs = get_fs();
62344 set_fs(KERNEL_DS);
62345 ret = hrtimer_nanosleep(&tu,
62346- rmtp ? (struct timespec __user *)&rmt : NULL,
62347+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
62348 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
62349 set_fs(oldfs);
62350
62351@@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
62352 mm_segment_t old_fs = get_fs();
62353
62354 set_fs(KERNEL_DS);
62355- ret = sys_sigpending((old_sigset_t __user *) &s);
62356+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
62357 set_fs(old_fs);
62358 if (ret == 0)
62359 ret = put_user(s, set);
62360@@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
62361 old_fs = get_fs();
62362 set_fs(KERNEL_DS);
62363 ret = sys_sigprocmask(how,
62364- set ? (old_sigset_t __user *) &s : NULL,
62365- oset ? (old_sigset_t __user *) &s : NULL);
62366+ set ? (old_sigset_t __force_user *) &s : NULL,
62367+ oset ? (old_sigset_t __force_user *) &s : NULL);
62368 set_fs(old_fs);
62369 if (ret == 0)
62370 if (oset)
62371@@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
62372 mm_segment_t old_fs = get_fs();
62373
62374 set_fs(KERNEL_DS);
62375- ret = sys_old_getrlimit(resource, &r);
62376+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
62377 set_fs(old_fs);
62378
62379 if (!ret) {
62380@@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
62381 mm_segment_t old_fs = get_fs();
62382
62383 set_fs(KERNEL_DS);
62384- ret = sys_getrusage(who, (struct rusage __user *) &r);
62385+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
62386 set_fs(old_fs);
62387
62388 if (ret)
62389@@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
62390 set_fs (KERNEL_DS);
62391 ret = sys_wait4(pid,
62392 (stat_addr ?
62393- (unsigned int __user *) &status : NULL),
62394- options, (struct rusage __user *) &r);
62395+ (unsigned int __force_user *) &status : NULL),
62396+ options, (struct rusage __force_user *) &r);
62397 set_fs (old_fs);
62398
62399 if (ret > 0) {
62400@@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
62401 memset(&info, 0, sizeof(info));
62402
62403 set_fs(KERNEL_DS);
62404- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
62405- uru ? (struct rusage __user *)&ru : NULL);
62406+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
62407+ uru ? (struct rusage __force_user *)&ru : NULL);
62408 set_fs(old_fs);
62409
62410 if ((ret < 0) || (info.si_signo == 0))
62411@@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
62412 oldfs = get_fs();
62413 set_fs(KERNEL_DS);
62414 err = sys_timer_settime(timer_id, flags,
62415- (struct itimerspec __user *) &newts,
62416- (struct itimerspec __user *) &oldts);
62417+ (struct itimerspec __force_user *) &newts,
62418+ (struct itimerspec __force_user *) &oldts);
62419 set_fs(oldfs);
62420 if (!err && old && put_compat_itimerspec(old, &oldts))
62421 return -EFAULT;
62422@@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
62423 oldfs = get_fs();
62424 set_fs(KERNEL_DS);
62425 err = sys_timer_gettime(timer_id,
62426- (struct itimerspec __user *) &ts);
62427+ (struct itimerspec __force_user *) &ts);
62428 set_fs(oldfs);
62429 if (!err && put_compat_itimerspec(setting, &ts))
62430 return -EFAULT;
62431@@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
62432 oldfs = get_fs();
62433 set_fs(KERNEL_DS);
62434 err = sys_clock_settime(which_clock,
62435- (struct timespec __user *) &ts);
62436+ (struct timespec __force_user *) &ts);
62437 set_fs(oldfs);
62438 return err;
62439 }
62440@@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
62441 oldfs = get_fs();
62442 set_fs(KERNEL_DS);
62443 err = sys_clock_gettime(which_clock,
62444- (struct timespec __user *) &ts);
62445+ (struct timespec __force_user *) &ts);
62446 set_fs(oldfs);
62447 if (!err && put_compat_timespec(&ts, tp))
62448 return -EFAULT;
62449@@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
62450
62451 oldfs = get_fs();
62452 set_fs(KERNEL_DS);
62453- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
62454+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
62455 set_fs(oldfs);
62456
62457 err = compat_put_timex(utp, &txc);
62458@@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
62459 oldfs = get_fs();
62460 set_fs(KERNEL_DS);
62461 err = sys_clock_getres(which_clock,
62462- (struct timespec __user *) &ts);
62463+ (struct timespec __force_user *) &ts);
62464 set_fs(oldfs);
62465 if (!err && tp && put_compat_timespec(&ts, tp))
62466 return -EFAULT;
62467@@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
62468 long err;
62469 mm_segment_t oldfs;
62470 struct timespec tu;
62471- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
62472+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
62473
62474- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
62475+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
62476 oldfs = get_fs();
62477 set_fs(KERNEL_DS);
62478 err = clock_nanosleep_restart(restart);
62479@@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
62480 oldfs = get_fs();
62481 set_fs(KERNEL_DS);
62482 err = sys_clock_nanosleep(which_clock, flags,
62483- (struct timespec __user *) &in,
62484- (struct timespec __user *) &out);
62485+ (struct timespec __force_user *) &in,
62486+ (struct timespec __force_user *) &out);
62487 set_fs(oldfs);
62488
62489 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
62490diff --git a/kernel/configs.c b/kernel/configs.c
62491index 42e8fa0..9e7406b 100644
62492--- a/kernel/configs.c
62493+++ b/kernel/configs.c
62494@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
62495 struct proc_dir_entry *entry;
62496
62497 /* create the current config file */
62498+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62499+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62500+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62501+ &ikconfig_file_ops);
62502+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62503+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62504+ &ikconfig_file_ops);
62505+#endif
62506+#else
62507 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62508 &ikconfig_file_ops);
62509+#endif
62510+
62511 if (!entry)
62512 return -ENOMEM;
62513
62514diff --git a/kernel/cred.c b/kernel/cred.c
62515index 5791612..a3c04dc 100644
62516--- a/kernel/cred.c
62517+++ b/kernel/cred.c
62518@@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
62519 validate_creds(cred);
62520 put_cred(cred);
62521 }
62522+
62523+#ifdef CONFIG_GRKERNSEC_SETXID
62524+ cred = (struct cred *) tsk->delayed_cred;
62525+ if (cred) {
62526+ tsk->delayed_cred = NULL;
62527+ validate_creds(cred);
62528+ put_cred(cred);
62529+ }
62530+#endif
62531 }
62532
62533 /**
62534@@ -470,7 +479,7 @@ error_put:
62535 * Always returns 0 thus allowing this function to be tail-called at the end
62536 * of, say, sys_setgid().
62537 */
62538-int commit_creds(struct cred *new)
62539+static int __commit_creds(struct cred *new)
62540 {
62541 struct task_struct *task = current;
62542 const struct cred *old = task->real_cred;
62543@@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
62544
62545 get_cred(new); /* we will require a ref for the subj creds too */
62546
62547+ gr_set_role_label(task, new->uid, new->gid);
62548+
62549 /* dumpability changes */
62550 if (old->euid != new->euid ||
62551 old->egid != new->egid ||
62552@@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
62553 put_cred(old);
62554 return 0;
62555 }
62556+#ifdef CONFIG_GRKERNSEC_SETXID
62557+extern int set_user(struct cred *new);
62558+
62559+void gr_delayed_cred_worker(void)
62560+{
62561+ const struct cred *new = current->delayed_cred;
62562+ struct cred *ncred;
62563+
62564+ current->delayed_cred = NULL;
62565+
62566+ if (current_uid() && new != NULL) {
62567+ // from doing get_cred on it when queueing this
62568+ put_cred(new);
62569+ return;
62570+ } else if (new == NULL)
62571+ return;
62572+
62573+ ncred = prepare_creds();
62574+ if (!ncred)
62575+ goto die;
62576+ // uids
62577+ ncred->uid = new->uid;
62578+ ncred->euid = new->euid;
62579+ ncred->suid = new->suid;
62580+ ncred->fsuid = new->fsuid;
62581+ // gids
62582+ ncred->gid = new->gid;
62583+ ncred->egid = new->egid;
62584+ ncred->sgid = new->sgid;
62585+ ncred->fsgid = new->fsgid;
62586+ // groups
62587+ if (set_groups(ncred, new->group_info) < 0) {
62588+ abort_creds(ncred);
62589+ goto die;
62590+ }
62591+ // caps
62592+ ncred->securebits = new->securebits;
62593+ ncred->cap_inheritable = new->cap_inheritable;
62594+ ncred->cap_permitted = new->cap_permitted;
62595+ ncred->cap_effective = new->cap_effective;
62596+ ncred->cap_bset = new->cap_bset;
62597+
62598+ if (set_user(ncred)) {
62599+ abort_creds(ncred);
62600+ goto die;
62601+ }
62602+
62603+ // from doing get_cred on it when queueing this
62604+ put_cred(new);
62605+
62606+ __commit_creds(ncred);
62607+ return;
62608+die:
62609+ // from doing get_cred on it when queueing this
62610+ put_cred(new);
62611+ do_group_exit(SIGKILL);
62612+}
62613+#endif
62614+
62615+int commit_creds(struct cred *new)
62616+{
62617+#ifdef CONFIG_GRKERNSEC_SETXID
62618+ struct task_struct *t;
62619+
62620+ /* we won't get called with tasklist_lock held for writing
62621+ and interrupts disabled as the cred struct in that case is
62622+ init_cred
62623+ */
62624+ if (grsec_enable_setxid && !current_is_single_threaded() &&
62625+ !current_uid() && new->uid) {
62626+ rcu_read_lock();
62627+ read_lock(&tasklist_lock);
62628+ for (t = next_thread(current); t != current;
62629+ t = next_thread(t)) {
62630+ if (t->delayed_cred == NULL) {
62631+ t->delayed_cred = get_cred(new);
62632+ set_tsk_need_resched(t);
62633+ }
62634+ }
62635+ read_unlock(&tasklist_lock);
62636+ rcu_read_unlock();
62637+ }
62638+#endif
62639+ return __commit_creds(new);
62640+}
62641+
62642 EXPORT_SYMBOL(commit_creds);
62643
62644 /**
62645diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
62646index 0d7c087..01b8cef 100644
62647--- a/kernel/debug/debug_core.c
62648+++ b/kernel/debug/debug_core.c
62649@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
62650 */
62651 static atomic_t masters_in_kgdb;
62652 static atomic_t slaves_in_kgdb;
62653-static atomic_t kgdb_break_tasklet_var;
62654+static atomic_unchecked_t kgdb_break_tasklet_var;
62655 atomic_t kgdb_setting_breakpoint;
62656
62657 struct task_struct *kgdb_usethread;
62658@@ -129,7 +129,7 @@ int kgdb_single_step;
62659 static pid_t kgdb_sstep_pid;
62660
62661 /* to keep track of the CPU which is doing the single stepping*/
62662-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62663+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62664
62665 /*
62666 * If you are debugging a problem where roundup (the collection of
62667@@ -542,7 +542,7 @@ return_normal:
62668 * kernel will only try for the value of sstep_tries before
62669 * giving up and continuing on.
62670 */
62671- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
62672+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
62673 (kgdb_info[cpu].task &&
62674 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
62675 atomic_set(&kgdb_active, -1);
62676@@ -636,8 +636,8 @@ cpu_master_loop:
62677 }
62678
62679 kgdb_restore:
62680- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
62681- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
62682+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
62683+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
62684 if (kgdb_info[sstep_cpu].task)
62685 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
62686 else
62687@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
62688 static void kgdb_tasklet_bpt(unsigned long ing)
62689 {
62690 kgdb_breakpoint();
62691- atomic_set(&kgdb_break_tasklet_var, 0);
62692+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
62693 }
62694
62695 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
62696
62697 void kgdb_schedule_breakpoint(void)
62698 {
62699- if (atomic_read(&kgdb_break_tasklet_var) ||
62700+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
62701 atomic_read(&kgdb_active) != -1 ||
62702 atomic_read(&kgdb_setting_breakpoint))
62703 return;
62704- atomic_inc(&kgdb_break_tasklet_var);
62705+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
62706 tasklet_schedule(&kgdb_tasklet_breakpoint);
62707 }
62708 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
62709diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
62710index 63786e7..0780cac 100644
62711--- a/kernel/debug/kdb/kdb_main.c
62712+++ b/kernel/debug/kdb/kdb_main.c
62713@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
62714 list_for_each_entry(mod, kdb_modules, list) {
62715
62716 kdb_printf("%-20s%8u 0x%p ", mod->name,
62717- mod->core_size, (void *)mod);
62718+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
62719 #ifdef CONFIG_MODULE_UNLOAD
62720 kdb_printf("%4d ", module_refcount(mod));
62721 #endif
62722@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
62723 kdb_printf(" (Loading)");
62724 else
62725 kdb_printf(" (Live)");
62726- kdb_printf(" 0x%p", mod->module_core);
62727+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
62728
62729 #ifdef CONFIG_MODULE_UNLOAD
62730 {
62731diff --git a/kernel/events/core.c b/kernel/events/core.c
62732index 58690af..d903d75 100644
62733--- a/kernel/events/core.c
62734+++ b/kernel/events/core.c
62735@@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
62736 return 0;
62737 }
62738
62739-static atomic64_t perf_event_id;
62740+static atomic64_unchecked_t perf_event_id;
62741
62742 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
62743 enum event_type_t event_type);
62744@@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info)
62745
62746 static inline u64 perf_event_count(struct perf_event *event)
62747 {
62748- return local64_read(&event->count) + atomic64_read(&event->child_count);
62749+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
62750 }
62751
62752 static u64 perf_event_read(struct perf_event *event)
62753@@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
62754 mutex_lock(&event->child_mutex);
62755 total += perf_event_read(event);
62756 *enabled += event->total_time_enabled +
62757- atomic64_read(&event->child_total_time_enabled);
62758+ atomic64_read_unchecked(&event->child_total_time_enabled);
62759 *running += event->total_time_running +
62760- atomic64_read(&event->child_total_time_running);
62761+ atomic64_read_unchecked(&event->child_total_time_running);
62762
62763 list_for_each_entry(child, &event->child_list, child_list) {
62764 total += perf_event_read(child);
62765@@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
62766 userpg->offset -= local64_read(&event->hw.prev_count);
62767
62768 userpg->time_enabled = enabled +
62769- atomic64_read(&event->child_total_time_enabled);
62770+ atomic64_read_unchecked(&event->child_total_time_enabled);
62771
62772 userpg->time_running = running +
62773- atomic64_read(&event->child_total_time_running);
62774+ atomic64_read_unchecked(&event->child_total_time_running);
62775
62776 barrier();
62777 ++userpg->lock;
62778@@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
62779 values[n++] = perf_event_count(event);
62780 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
62781 values[n++] = enabled +
62782- atomic64_read(&event->child_total_time_enabled);
62783+ atomic64_read_unchecked(&event->child_total_time_enabled);
62784 }
62785 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
62786 values[n++] = running +
62787- atomic64_read(&event->child_total_time_running);
62788+ atomic64_read_unchecked(&event->child_total_time_running);
62789 }
62790 if (read_format & PERF_FORMAT_ID)
62791 values[n++] = primary_event_id(event);
62792@@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
62793 * need to add enough zero bytes after the string to handle
62794 * the 64bit alignment we do later.
62795 */
62796- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
62797+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
62798 if (!buf) {
62799 name = strncpy(tmp, "//enomem", sizeof(tmp));
62800 goto got_name;
62801 }
62802- name = d_path(&file->f_path, buf, PATH_MAX);
62803+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
62804 if (IS_ERR(name)) {
62805 name = strncpy(tmp, "//toolong", sizeof(tmp));
62806 goto got_name;
62807@@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
62808 event->parent = parent_event;
62809
62810 event->ns = get_pid_ns(current->nsproxy->pid_ns);
62811- event->id = atomic64_inc_return(&perf_event_id);
62812+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
62813
62814 event->state = PERF_EVENT_STATE_INACTIVE;
62815
62816@@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
62817 /*
62818 * Add back the child's count to the parent's count:
62819 */
62820- atomic64_add(child_val, &parent_event->child_count);
62821- atomic64_add(child_event->total_time_enabled,
62822+ atomic64_add_unchecked(child_val, &parent_event->child_count);
62823+ atomic64_add_unchecked(child_event->total_time_enabled,
62824 &parent_event->child_total_time_enabled);
62825- atomic64_add(child_event->total_time_running,
62826+ atomic64_add_unchecked(child_event->total_time_running,
62827 &parent_event->child_total_time_running);
62828
62829 /*
62830diff --git a/kernel/exit.c b/kernel/exit.c
62831index e6e01b9..619f837 100644
62832--- a/kernel/exit.c
62833+++ b/kernel/exit.c
62834@@ -57,6 +57,10 @@
62835 #include <asm/pgtable.h>
62836 #include <asm/mmu_context.h>
62837
62838+#ifdef CONFIG_GRKERNSEC
62839+extern rwlock_t grsec_exec_file_lock;
62840+#endif
62841+
62842 static void exit_mm(struct task_struct * tsk);
62843
62844 static void __unhash_process(struct task_struct *p, bool group_dead)
62845@@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
62846 struct task_struct *leader;
62847 int zap_leader;
62848 repeat:
62849+#ifdef CONFIG_NET
62850+ gr_del_task_from_ip_table(p);
62851+#endif
62852+
62853 /* don't need to get the RCU readlock here - the process is dead and
62854 * can't be modifying its own credentials. But shut RCU-lockdep up */
62855 rcu_read_lock();
62856@@ -380,7 +388,7 @@ int allow_signal(int sig)
62857 * know it'll be handled, so that they don't get converted to
62858 * SIGKILL or just silently dropped.
62859 */
62860- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62861+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62862 recalc_sigpending();
62863 spin_unlock_irq(&current->sighand->siglock);
62864 return 0;
62865@@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
62866 vsnprintf(current->comm, sizeof(current->comm), name, args);
62867 va_end(args);
62868
62869+#ifdef CONFIG_GRKERNSEC
62870+ write_lock(&grsec_exec_file_lock);
62871+ if (current->exec_file) {
62872+ fput(current->exec_file);
62873+ current->exec_file = NULL;
62874+ }
62875+ write_unlock(&grsec_exec_file_lock);
62876+#endif
62877+
62878+ gr_set_kernel_label(current);
62879+
62880 /*
62881 * If we were started as result of loading a module, close all of the
62882 * user space pages. We don't need them, and if we didn't close them
62883@@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
62884 struct task_struct *tsk = current;
62885 int group_dead;
62886
62887+ set_fs(USER_DS);
62888+
62889 profile_task_exit(tsk);
62890
62891 WARN_ON(blk_needs_flush_plug(tsk));
62892@@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
62893 * mm_release()->clear_child_tid() from writing to a user-controlled
62894 * kernel address.
62895 */
62896- set_fs(USER_DS);
62897
62898 ptrace_event(PTRACE_EVENT_EXIT, code);
62899
62900@@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
62901 tsk->exit_code = code;
62902 taskstats_exit(tsk, group_dead);
62903
62904+ gr_acl_handle_psacct(tsk, code);
62905+ gr_acl_handle_exit();
62906+
62907 exit_mm(tsk);
62908
62909 if (group_dead)
62910diff --git a/kernel/fork.c b/kernel/fork.c
62911index da4a6a1..c04943c 100644
62912--- a/kernel/fork.c
62913+++ b/kernel/fork.c
62914@@ -280,7 +280,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
62915 *stackend = STACK_END_MAGIC; /* for overflow detection */
62916
62917 #ifdef CONFIG_CC_STACKPROTECTOR
62918- tsk->stack_canary = get_random_int();
62919+ tsk->stack_canary = pax_get_random_long();
62920 #endif
62921
62922 /*
62923@@ -304,13 +304,77 @@ out:
62924 }
62925
62926 #ifdef CONFIG_MMU
62927+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
62928+{
62929+ struct vm_area_struct *tmp;
62930+ unsigned long charge;
62931+ struct mempolicy *pol;
62932+ struct file *file;
62933+
62934+ charge = 0;
62935+ if (mpnt->vm_flags & VM_ACCOUNT) {
62936+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
62937+ if (security_vm_enough_memory(len))
62938+ goto fail_nomem;
62939+ charge = len;
62940+ }
62941+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62942+ if (!tmp)
62943+ goto fail_nomem;
62944+ *tmp = *mpnt;
62945+ tmp->vm_mm = mm;
62946+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
62947+ pol = mpol_dup(vma_policy(mpnt));
62948+ if (IS_ERR(pol))
62949+ goto fail_nomem_policy;
62950+ vma_set_policy(tmp, pol);
62951+ if (anon_vma_fork(tmp, mpnt))
62952+ goto fail_nomem_anon_vma_fork;
62953+ tmp->vm_flags &= ~VM_LOCKED;
62954+ tmp->vm_next = tmp->vm_prev = NULL;
62955+ tmp->vm_mirror = NULL;
62956+ file = tmp->vm_file;
62957+ if (file) {
62958+ struct inode *inode = file->f_path.dentry->d_inode;
62959+ struct address_space *mapping = file->f_mapping;
62960+
62961+ get_file(file);
62962+ if (tmp->vm_flags & VM_DENYWRITE)
62963+ atomic_dec(&inode->i_writecount);
62964+ mutex_lock(&mapping->i_mmap_mutex);
62965+ if (tmp->vm_flags & VM_SHARED)
62966+ mapping->i_mmap_writable++;
62967+ flush_dcache_mmap_lock(mapping);
62968+ /* insert tmp into the share list, just after mpnt */
62969+ vma_prio_tree_add(tmp, mpnt);
62970+ flush_dcache_mmap_unlock(mapping);
62971+ mutex_unlock(&mapping->i_mmap_mutex);
62972+ }
62973+
62974+ /*
62975+ * Clear hugetlb-related page reserves for children. This only
62976+ * affects MAP_PRIVATE mappings. Faults generated by the child
62977+ * are not guaranteed to succeed, even if read-only
62978+ */
62979+ if (is_vm_hugetlb_page(tmp))
62980+ reset_vma_resv_huge_pages(tmp);
62981+
62982+ return tmp;
62983+
62984+fail_nomem_anon_vma_fork:
62985+ mpol_put(pol);
62986+fail_nomem_policy:
62987+ kmem_cache_free(vm_area_cachep, tmp);
62988+fail_nomem:
62989+ vm_unacct_memory(charge);
62990+ return NULL;
62991+}
62992+
62993 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
62994 {
62995 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
62996 struct rb_node **rb_link, *rb_parent;
62997 int retval;
62998- unsigned long charge;
62999- struct mempolicy *pol;
63000
63001 down_write(&oldmm->mmap_sem);
63002 flush_cache_dup_mm(oldmm);
63003@@ -322,8 +386,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63004 mm->locked_vm = 0;
63005 mm->mmap = NULL;
63006 mm->mmap_cache = NULL;
63007- mm->free_area_cache = oldmm->mmap_base;
63008- mm->cached_hole_size = ~0UL;
63009+ mm->free_area_cache = oldmm->free_area_cache;
63010+ mm->cached_hole_size = oldmm->cached_hole_size;
63011 mm->map_count = 0;
63012 cpumask_clear(mm_cpumask(mm));
63013 mm->mm_rb = RB_ROOT;
63014@@ -339,8 +403,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63015
63016 prev = NULL;
63017 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63018- struct file *file;
63019-
63020 if (mpnt->vm_flags & VM_DONTCOPY) {
63021 long pages = vma_pages(mpnt);
63022 mm->total_vm -= pages;
63023@@ -348,53 +410,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63024 -pages);
63025 continue;
63026 }
63027- charge = 0;
63028- if (mpnt->vm_flags & VM_ACCOUNT) {
63029- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63030- if (security_vm_enough_memory(len))
63031- goto fail_nomem;
63032- charge = len;
63033+ tmp = dup_vma(mm, mpnt);
63034+ if (!tmp) {
63035+ retval = -ENOMEM;
63036+ goto out;
63037 }
63038- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63039- if (!tmp)
63040- goto fail_nomem;
63041- *tmp = *mpnt;
63042- INIT_LIST_HEAD(&tmp->anon_vma_chain);
63043- pol = mpol_dup(vma_policy(mpnt));
63044- retval = PTR_ERR(pol);
63045- if (IS_ERR(pol))
63046- goto fail_nomem_policy;
63047- vma_set_policy(tmp, pol);
63048- tmp->vm_mm = mm;
63049- if (anon_vma_fork(tmp, mpnt))
63050- goto fail_nomem_anon_vma_fork;
63051- tmp->vm_flags &= ~VM_LOCKED;
63052- tmp->vm_next = tmp->vm_prev = NULL;
63053- file = tmp->vm_file;
63054- if (file) {
63055- struct inode *inode = file->f_path.dentry->d_inode;
63056- struct address_space *mapping = file->f_mapping;
63057-
63058- get_file(file);
63059- if (tmp->vm_flags & VM_DENYWRITE)
63060- atomic_dec(&inode->i_writecount);
63061- mutex_lock(&mapping->i_mmap_mutex);
63062- if (tmp->vm_flags & VM_SHARED)
63063- mapping->i_mmap_writable++;
63064- flush_dcache_mmap_lock(mapping);
63065- /* insert tmp into the share list, just after mpnt */
63066- vma_prio_tree_add(tmp, mpnt);
63067- flush_dcache_mmap_unlock(mapping);
63068- mutex_unlock(&mapping->i_mmap_mutex);
63069- }
63070-
63071- /*
63072- * Clear hugetlb-related page reserves for children. This only
63073- * affects MAP_PRIVATE mappings. Faults generated by the child
63074- * are not guaranteed to succeed, even if read-only
63075- */
63076- if (is_vm_hugetlb_page(tmp))
63077- reset_vma_resv_huge_pages(tmp);
63078
63079 /*
63080 * Link in the new vma and copy the page table entries.
63081@@ -417,6 +437,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63082 if (retval)
63083 goto out;
63084 }
63085+
63086+#ifdef CONFIG_PAX_SEGMEXEC
63087+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63088+ struct vm_area_struct *mpnt_m;
63089+
63090+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63091+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63092+
63093+ if (!mpnt->vm_mirror)
63094+ continue;
63095+
63096+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63097+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63098+ mpnt->vm_mirror = mpnt_m;
63099+ } else {
63100+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63101+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63102+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63103+ mpnt->vm_mirror->vm_mirror = mpnt;
63104+ }
63105+ }
63106+ BUG_ON(mpnt_m);
63107+ }
63108+#endif
63109+
63110 /* a new mm has just been created */
63111 arch_dup_mmap(oldmm, mm);
63112 retval = 0;
63113@@ -425,14 +470,6 @@ out:
63114 flush_tlb_mm(oldmm);
63115 up_write(&oldmm->mmap_sem);
63116 return retval;
63117-fail_nomem_anon_vma_fork:
63118- mpol_put(pol);
63119-fail_nomem_policy:
63120- kmem_cache_free(vm_area_cachep, tmp);
63121-fail_nomem:
63122- retval = -ENOMEM;
63123- vm_unacct_memory(charge);
63124- goto out;
63125 }
63126
63127 static inline int mm_alloc_pgd(struct mm_struct *mm)
63128@@ -829,13 +866,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
63129 spin_unlock(&fs->lock);
63130 return -EAGAIN;
63131 }
63132- fs->users++;
63133+ atomic_inc(&fs->users);
63134 spin_unlock(&fs->lock);
63135 return 0;
63136 }
63137 tsk->fs = copy_fs_struct(fs);
63138 if (!tsk->fs)
63139 return -ENOMEM;
63140+ gr_set_chroot_entries(tsk, &tsk->fs->root);
63141 return 0;
63142 }
63143
63144@@ -1097,6 +1135,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63145 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63146 #endif
63147 retval = -EAGAIN;
63148+
63149+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63150+
63151 if (atomic_read(&p->real_cred->user->processes) >=
63152 task_rlimit(p, RLIMIT_NPROC)) {
63153 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63154@@ -1256,6 +1297,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63155 if (clone_flags & CLONE_THREAD)
63156 p->tgid = current->tgid;
63157
63158+ gr_copy_label(p);
63159+
63160 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63161 /*
63162 * Clear TID on mm_release()?
63163@@ -1418,6 +1461,8 @@ bad_fork_cleanup_count:
63164 bad_fork_free:
63165 free_task(p);
63166 fork_out:
63167+ gr_log_forkfail(retval);
63168+
63169 return ERR_PTR(retval);
63170 }
63171
63172@@ -1518,6 +1563,8 @@ long do_fork(unsigned long clone_flags,
63173 if (clone_flags & CLONE_PARENT_SETTID)
63174 put_user(nr, parent_tidptr);
63175
63176+ gr_handle_brute_check();
63177+
63178 if (clone_flags & CLONE_VFORK) {
63179 p->vfork_done = &vfork;
63180 init_completion(&vfork);
63181@@ -1627,7 +1674,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
63182 return 0;
63183
63184 /* don't need lock here; in the worst case we'll do useless copy */
63185- if (fs->users == 1)
63186+ if (atomic_read(&fs->users) == 1)
63187 return 0;
63188
63189 *new_fsp = copy_fs_struct(fs);
63190@@ -1716,7 +1763,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
63191 fs = current->fs;
63192 spin_lock(&fs->lock);
63193 current->fs = new_fs;
63194- if (--fs->users)
63195+ gr_set_chroot_entries(current, &current->fs->root);
63196+ if (atomic_dec_return(&fs->users))
63197 new_fs = NULL;
63198 else
63199 new_fs = fs;
63200diff --git a/kernel/futex.c b/kernel/futex.c
63201index 1614be2..37abc7e 100644
63202--- a/kernel/futex.c
63203+++ b/kernel/futex.c
63204@@ -54,6 +54,7 @@
63205 #include <linux/mount.h>
63206 #include <linux/pagemap.h>
63207 #include <linux/syscalls.h>
63208+#include <linux/ptrace.h>
63209 #include <linux/signal.h>
63210 #include <linux/export.h>
63211 #include <linux/magic.h>
63212@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
63213 struct page *page, *page_head;
63214 int err, ro = 0;
63215
63216+#ifdef CONFIG_PAX_SEGMEXEC
63217+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63218+ return -EFAULT;
63219+#endif
63220+
63221 /*
63222 * The futex address must be "naturally" aligned.
63223 */
63224@@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
63225 if (!p)
63226 goto err_unlock;
63227 ret = -EPERM;
63228+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63229+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63230+ goto err_unlock;
63231+#endif
63232 pcred = __task_cred(p);
63233 /* If victim is in different user_ns, then uids are not
63234 comparable, so we must have CAP_SYS_PTRACE */
63235@@ -2724,6 +2734,7 @@ static int __init futex_init(void)
63236 {
63237 u32 curval;
63238 int i;
63239+ mm_segment_t oldfs;
63240
63241 /*
63242 * This will fail and we want it. Some arch implementations do
63243@@ -2735,8 +2746,11 @@ static int __init futex_init(void)
63244 * implementation, the non-functional ones will return
63245 * -ENOSYS.
63246 */
63247+ oldfs = get_fs();
63248+ set_fs(USER_DS);
63249 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63250 futex_cmpxchg_enabled = 1;
63251+ set_fs(oldfs);
63252
63253 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63254 plist_head_init(&futex_queues[i].chain);
63255diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
63256index 5f9e689..582d46d 100644
63257--- a/kernel/futex_compat.c
63258+++ b/kernel/futex_compat.c
63259@@ -10,6 +10,7 @@
63260 #include <linux/compat.h>
63261 #include <linux/nsproxy.h>
63262 #include <linux/futex.h>
63263+#include <linux/ptrace.h>
63264
63265 #include <asm/uaccess.h>
63266
63267@@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63268 {
63269 struct compat_robust_list_head __user *head;
63270 unsigned long ret;
63271- const struct cred *cred = current_cred(), *pcred;
63272+ const struct cred *cred = current_cred();
63273+ const struct cred *pcred;
63274
63275 if (!futex_cmpxchg_enabled)
63276 return -ENOSYS;
63277@@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63278 if (!p)
63279 goto err_unlock;
63280 ret = -EPERM;
63281+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63282+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63283+ goto err_unlock;
63284+#endif
63285 pcred = __task_cred(p);
63286 /* If victim is in different user_ns, then uids are not
63287 comparable, so we must have CAP_SYS_PTRACE */
63288diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
63289index 9b22d03..6295b62 100644
63290--- a/kernel/gcov/base.c
63291+++ b/kernel/gcov/base.c
63292@@ -102,11 +102,6 @@ void gcov_enable_events(void)
63293 }
63294
63295 #ifdef CONFIG_MODULES
63296-static inline int within(void *addr, void *start, unsigned long size)
63297-{
63298- return ((addr >= start) && (addr < start + size));
63299-}
63300-
63301 /* Update list and generate events when modules are unloaded. */
63302 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63303 void *data)
63304@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63305 prev = NULL;
63306 /* Remove entries located in module from linked list. */
63307 for (info = gcov_info_head; info; info = info->next) {
63308- if (within(info, mod->module_core, mod->core_size)) {
63309+ if (within_module_core_rw((unsigned long)info, mod)) {
63310 if (prev)
63311 prev->next = info->next;
63312 else
63313diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
63314index ae34bf5..4e2f3d0 100644
63315--- a/kernel/hrtimer.c
63316+++ b/kernel/hrtimer.c
63317@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
63318 local_irq_restore(flags);
63319 }
63320
63321-static void run_hrtimer_softirq(struct softirq_action *h)
63322+static void run_hrtimer_softirq(void)
63323 {
63324 hrtimer_peek_ahead_timers();
63325 }
63326diff --git a/kernel/jump_label.c b/kernel/jump_label.c
63327index 66ff710..05a5128 100644
63328--- a/kernel/jump_label.c
63329+++ b/kernel/jump_label.c
63330@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
63331
63332 size = (((unsigned long)stop - (unsigned long)start)
63333 / sizeof(struct jump_entry));
63334+ pax_open_kernel();
63335 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
63336+ pax_close_kernel();
63337 }
63338
63339 static void jump_label_update(struct jump_label_key *key, int enable);
63340@@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
63341 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
63342 struct jump_entry *iter;
63343
63344+ pax_open_kernel();
63345 for (iter = iter_start; iter < iter_stop; iter++) {
63346 if (within_module_init(iter->code, mod))
63347 iter->code = 0;
63348 }
63349+ pax_close_kernel();
63350 }
63351
63352 static int
63353diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
63354index 079f1d3..a407562 100644
63355--- a/kernel/kallsyms.c
63356+++ b/kernel/kallsyms.c
63357@@ -11,6 +11,9 @@
63358 * Changed the compression method from stem compression to "table lookup"
63359 * compression (see scripts/kallsyms.c for a more complete description)
63360 */
63361+#ifdef CONFIG_GRKERNSEC_HIDESYM
63362+#define __INCLUDED_BY_HIDESYM 1
63363+#endif
63364 #include <linux/kallsyms.h>
63365 #include <linux/module.h>
63366 #include <linux/init.h>
63367@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
63368
63369 static inline int is_kernel_inittext(unsigned long addr)
63370 {
63371+ if (system_state != SYSTEM_BOOTING)
63372+ return 0;
63373+
63374 if (addr >= (unsigned long)_sinittext
63375 && addr <= (unsigned long)_einittext)
63376 return 1;
63377 return 0;
63378 }
63379
63380+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63381+#ifdef CONFIG_MODULES
63382+static inline int is_module_text(unsigned long addr)
63383+{
63384+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63385+ return 1;
63386+
63387+ addr = ktla_ktva(addr);
63388+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63389+}
63390+#else
63391+static inline int is_module_text(unsigned long addr)
63392+{
63393+ return 0;
63394+}
63395+#endif
63396+#endif
63397+
63398 static inline int is_kernel_text(unsigned long addr)
63399 {
63400 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63401@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
63402
63403 static inline int is_kernel(unsigned long addr)
63404 {
63405+
63406+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63407+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63408+ return 1;
63409+
63410+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63411+#else
63412 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63413+#endif
63414+
63415 return 1;
63416 return in_gate_area_no_mm(addr);
63417 }
63418
63419 static int is_ksym_addr(unsigned long addr)
63420 {
63421+
63422+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63423+ if (is_module_text(addr))
63424+ return 0;
63425+#endif
63426+
63427 if (all_var)
63428 return is_kernel(addr);
63429
63430@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
63431
63432 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63433 {
63434- iter->name[0] = '\0';
63435 iter->nameoff = get_symbol_offset(new_pos);
63436 iter->pos = new_pos;
63437 }
63438@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
63439 {
63440 struct kallsym_iter *iter = m->private;
63441
63442+#ifdef CONFIG_GRKERNSEC_HIDESYM
63443+ if (current_uid())
63444+ return 0;
63445+#endif
63446+
63447 /* Some debugging symbols have no name. Ignore them. */
63448 if (!iter->name[0])
63449 return 0;
63450@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
63451 struct kallsym_iter *iter;
63452 int ret;
63453
63454- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63455+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63456 if (!iter)
63457 return -ENOMEM;
63458 reset_iter(iter, 0);
63459diff --git a/kernel/kexec.c b/kernel/kexec.c
63460index dc7bc08..4601964 100644
63461--- a/kernel/kexec.c
63462+++ b/kernel/kexec.c
63463@@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
63464 unsigned long flags)
63465 {
63466 struct compat_kexec_segment in;
63467- struct kexec_segment out, __user *ksegments;
63468+ struct kexec_segment out;
63469+ struct kexec_segment __user *ksegments;
63470 unsigned long i, result;
63471
63472 /* Don't allow clients that don't understand the native
63473diff --git a/kernel/kmod.c b/kernel/kmod.c
63474index a4bea97..7a1ae9a 100644
63475--- a/kernel/kmod.c
63476+++ b/kernel/kmod.c
63477@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
63478 * If module auto-loading support is disabled then this function
63479 * becomes a no-operation.
63480 */
63481-int __request_module(bool wait, const char *fmt, ...)
63482+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63483 {
63484- va_list args;
63485 char module_name[MODULE_NAME_LEN];
63486 unsigned int max_modprobes;
63487 int ret;
63488- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63489+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63490 static char *envp[] = { "HOME=/",
63491 "TERM=linux",
63492 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63493@@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
63494 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63495 static int kmod_loop_msg;
63496
63497- va_start(args, fmt);
63498- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63499- va_end(args);
63500+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63501 if (ret >= MODULE_NAME_LEN)
63502 return -ENAMETOOLONG;
63503
63504@@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
63505 if (ret)
63506 return ret;
63507
63508+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63509+ if (!current_uid()) {
63510+ /* hack to workaround consolekit/udisks stupidity */
63511+ read_lock(&tasklist_lock);
63512+ if (!strcmp(current->comm, "mount") &&
63513+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63514+ read_unlock(&tasklist_lock);
63515+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63516+ return -EPERM;
63517+ }
63518+ read_unlock(&tasklist_lock);
63519+ }
63520+#endif
63521+
63522 /* If modprobe needs a service that is in a module, we get a recursive
63523 * loop. Limit the number of running kmod threads to max_threads/2 or
63524 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63525@@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
63526 atomic_dec(&kmod_concurrent);
63527 return ret;
63528 }
63529+
63530+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63531+{
63532+ va_list args;
63533+ int ret;
63534+
63535+ va_start(args, fmt);
63536+ ret = ____request_module(wait, module_param, fmt, args);
63537+ va_end(args);
63538+
63539+ return ret;
63540+}
63541+
63542+int __request_module(bool wait, const char *fmt, ...)
63543+{
63544+ va_list args;
63545+ int ret;
63546+
63547+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63548+ if (current_uid()) {
63549+ char module_param[MODULE_NAME_LEN];
63550+
63551+ memset(module_param, 0, sizeof(module_param));
63552+
63553+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63554+
63555+ va_start(args, fmt);
63556+ ret = ____request_module(wait, module_param, fmt, args);
63557+ va_end(args);
63558+
63559+ return ret;
63560+ }
63561+#endif
63562+
63563+ va_start(args, fmt);
63564+ ret = ____request_module(wait, NULL, fmt, args);
63565+ va_end(args);
63566+
63567+ return ret;
63568+}
63569+
63570 EXPORT_SYMBOL(__request_module);
63571 #endif /* CONFIG_MODULES */
63572
63573@@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
63574 *
63575 * Thus the __user pointer cast is valid here.
63576 */
63577- sys_wait4(pid, (int __user *)&ret, 0, NULL);
63578+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
63579
63580 /*
63581 * If ret is 0, either ____call_usermodehelper failed and the
63582diff --git a/kernel/kprobes.c b/kernel/kprobes.c
63583index 52fd049..3def6a8 100644
63584--- a/kernel/kprobes.c
63585+++ b/kernel/kprobes.c
63586@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
63587 * kernel image and loaded module images reside. This is required
63588 * so x86_64 can correctly handle the %rip-relative fixups.
63589 */
63590- kip->insns = module_alloc(PAGE_SIZE);
63591+ kip->insns = module_alloc_exec(PAGE_SIZE);
63592 if (!kip->insns) {
63593 kfree(kip);
63594 return NULL;
63595@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
63596 */
63597 if (!list_is_singular(&kip->list)) {
63598 list_del(&kip->list);
63599- module_free(NULL, kip->insns);
63600+ module_free_exec(NULL, kip->insns);
63601 kfree(kip);
63602 }
63603 return 1;
63604@@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
63605 {
63606 int i, err = 0;
63607 unsigned long offset = 0, size = 0;
63608- char *modname, namebuf[128];
63609+ char *modname, namebuf[KSYM_NAME_LEN];
63610 const char *symbol_name;
63611 void *addr;
63612 struct kprobe_blackpoint *kb;
63613@@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
63614 const char *sym = NULL;
63615 unsigned int i = *(loff_t *) v;
63616 unsigned long offset = 0;
63617- char *modname, namebuf[128];
63618+ char *modname, namebuf[KSYM_NAME_LEN];
63619
63620 head = &kprobe_table[i];
63621 preempt_disable();
63622diff --git a/kernel/lockdep.c b/kernel/lockdep.c
63623index b2e08c9..01d8049 100644
63624--- a/kernel/lockdep.c
63625+++ b/kernel/lockdep.c
63626@@ -592,6 +592,10 @@ static int static_obj(void *obj)
63627 end = (unsigned long) &_end,
63628 addr = (unsigned long) obj;
63629
63630+#ifdef CONFIG_PAX_KERNEXEC
63631+ start = ktla_ktva(start);
63632+#endif
63633+
63634 /*
63635 * static variable?
63636 */
63637@@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
63638 if (!static_obj(lock->key)) {
63639 debug_locks_off();
63640 printk("INFO: trying to register non-static key.\n");
63641+ printk("lock:%pS key:%pS.\n", lock, lock->key);
63642 printk("the code is fine but needs lockdep annotation.\n");
63643 printk("turning off the locking correctness validator.\n");
63644 dump_stack();
63645@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
63646 if (!class)
63647 return 0;
63648 }
63649- atomic_inc((atomic_t *)&class->ops);
63650+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
63651 if (very_verbose(class)) {
63652 printk("\nacquire class [%p] %s", class->key, class->name);
63653 if (class->name_version > 1)
63654diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
63655index 91c32a0..b2c71c5 100644
63656--- a/kernel/lockdep_proc.c
63657+++ b/kernel/lockdep_proc.c
63658@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
63659
63660 static void print_name(struct seq_file *m, struct lock_class *class)
63661 {
63662- char str[128];
63663+ char str[KSYM_NAME_LEN];
63664 const char *name = class->name;
63665
63666 if (!name) {
63667diff --git a/kernel/module.c b/kernel/module.c
63668index 178333c..04e3408 100644
63669--- a/kernel/module.c
63670+++ b/kernel/module.c
63671@@ -58,6 +58,7 @@
63672 #include <linux/jump_label.h>
63673 #include <linux/pfn.h>
63674 #include <linux/bsearch.h>
63675+#include <linux/grsecurity.h>
63676
63677 #define CREATE_TRACE_POINTS
63678 #include <trace/events/module.h>
63679@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
63680
63681 /* Bounds of module allocation, for speeding __module_address.
63682 * Protected by module_mutex. */
63683-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63684+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63685+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63686
63687 int register_module_notifier(struct notifier_block * nb)
63688 {
63689@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
63690 return true;
63691
63692 list_for_each_entry_rcu(mod, &modules, list) {
63693- struct symsearch arr[] = {
63694+ struct symsearch modarr[] = {
63695 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63696 NOT_GPL_ONLY, false },
63697 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63698@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
63699 #endif
63700 };
63701
63702- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63703+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63704 return true;
63705 }
63706 return false;
63707@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
63708 static int percpu_modalloc(struct module *mod,
63709 unsigned long size, unsigned long align)
63710 {
63711- if (align > PAGE_SIZE) {
63712+ if (align-1 >= PAGE_SIZE) {
63713 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63714 mod->name, align, PAGE_SIZE);
63715 align = PAGE_SIZE;
63716@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
63717 */
63718 #ifdef CONFIG_SYSFS
63719
63720-#ifdef CONFIG_KALLSYMS
63721+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63722 static inline bool sect_empty(const Elf_Shdr *sect)
63723 {
63724 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
63725@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
63726
63727 static void unset_module_core_ro_nx(struct module *mod)
63728 {
63729- set_page_attributes(mod->module_core + mod->core_text_size,
63730- mod->module_core + mod->core_size,
63731+ set_page_attributes(mod->module_core_rw,
63732+ mod->module_core_rw + mod->core_size_rw,
63733 set_memory_x);
63734- set_page_attributes(mod->module_core,
63735- mod->module_core + mod->core_ro_size,
63736+ set_page_attributes(mod->module_core_rx,
63737+ mod->module_core_rx + mod->core_size_rx,
63738 set_memory_rw);
63739 }
63740
63741 static void unset_module_init_ro_nx(struct module *mod)
63742 {
63743- set_page_attributes(mod->module_init + mod->init_text_size,
63744- mod->module_init + mod->init_size,
63745+ set_page_attributes(mod->module_init_rw,
63746+ mod->module_init_rw + mod->init_size_rw,
63747 set_memory_x);
63748- set_page_attributes(mod->module_init,
63749- mod->module_init + mod->init_ro_size,
63750+ set_page_attributes(mod->module_init_rx,
63751+ mod->module_init_rx + mod->init_size_rx,
63752 set_memory_rw);
63753 }
63754
63755@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
63756
63757 mutex_lock(&module_mutex);
63758 list_for_each_entry_rcu(mod, &modules, list) {
63759- if ((mod->module_core) && (mod->core_text_size)) {
63760- set_page_attributes(mod->module_core,
63761- mod->module_core + mod->core_text_size,
63762+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63763+ set_page_attributes(mod->module_core_rx,
63764+ mod->module_core_rx + mod->core_size_rx,
63765 set_memory_rw);
63766 }
63767- if ((mod->module_init) && (mod->init_text_size)) {
63768- set_page_attributes(mod->module_init,
63769- mod->module_init + mod->init_text_size,
63770+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63771+ set_page_attributes(mod->module_init_rx,
63772+ mod->module_init_rx + mod->init_size_rx,
63773 set_memory_rw);
63774 }
63775 }
63776@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
63777
63778 mutex_lock(&module_mutex);
63779 list_for_each_entry_rcu(mod, &modules, list) {
63780- if ((mod->module_core) && (mod->core_text_size)) {
63781- set_page_attributes(mod->module_core,
63782- mod->module_core + mod->core_text_size,
63783+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63784+ set_page_attributes(mod->module_core_rx,
63785+ mod->module_core_rx + mod->core_size_rx,
63786 set_memory_ro);
63787 }
63788- if ((mod->module_init) && (mod->init_text_size)) {
63789- set_page_attributes(mod->module_init,
63790- mod->module_init + mod->init_text_size,
63791+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63792+ set_page_attributes(mod->module_init_rx,
63793+ mod->module_init_rx + mod->init_size_rx,
63794 set_memory_ro);
63795 }
63796 }
63797@@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
63798
63799 /* This may be NULL, but that's OK */
63800 unset_module_init_ro_nx(mod);
63801- module_free(mod, mod->module_init);
63802+ module_free(mod, mod->module_init_rw);
63803+ module_free_exec(mod, mod->module_init_rx);
63804 kfree(mod->args);
63805 percpu_modfree(mod);
63806
63807 /* Free lock-classes: */
63808- lockdep_free_key_range(mod->module_core, mod->core_size);
63809+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63810+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63811
63812 /* Finally, free the core (containing the module structure) */
63813 unset_module_core_ro_nx(mod);
63814- module_free(mod, mod->module_core);
63815+ module_free_exec(mod, mod->module_core_rx);
63816+ module_free(mod, mod->module_core_rw);
63817
63818 #ifdef CONFIG_MPU
63819 update_protections(current->mm);
63820@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
63821 unsigned int i;
63822 int ret = 0;
63823 const struct kernel_symbol *ksym;
63824+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63825+ int is_fs_load = 0;
63826+ int register_filesystem_found = 0;
63827+ char *p;
63828+
63829+ p = strstr(mod->args, "grsec_modharden_fs");
63830+ if (p) {
63831+ char *endptr = p + strlen("grsec_modharden_fs");
63832+ /* copy \0 as well */
63833+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63834+ is_fs_load = 1;
63835+ }
63836+#endif
63837
63838 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
63839 const char *name = info->strtab + sym[i].st_name;
63840
63841+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63842+ /* it's a real shame this will never get ripped and copied
63843+ upstream! ;(
63844+ */
63845+ if (is_fs_load && !strcmp(name, "register_filesystem"))
63846+ register_filesystem_found = 1;
63847+#endif
63848+
63849 switch (sym[i].st_shndx) {
63850 case SHN_COMMON:
63851 /* We compiled with -fno-common. These are not
63852@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
63853 ksym = resolve_symbol_wait(mod, info, name);
63854 /* Ok if resolved. */
63855 if (ksym && !IS_ERR(ksym)) {
63856+ pax_open_kernel();
63857 sym[i].st_value = ksym->value;
63858+ pax_close_kernel();
63859 break;
63860 }
63861
63862@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
63863 secbase = (unsigned long)mod_percpu(mod);
63864 else
63865 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
63866+ pax_open_kernel();
63867 sym[i].st_value += secbase;
63868+ pax_close_kernel();
63869 break;
63870 }
63871 }
63872
63873+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63874+ if (is_fs_load && !register_filesystem_found) {
63875+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63876+ ret = -EPERM;
63877+ }
63878+#endif
63879+
63880 return ret;
63881 }
63882
63883@@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
63884 || s->sh_entsize != ~0UL
63885 || strstarts(sname, ".init"))
63886 continue;
63887- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63888+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63889+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63890+ else
63891+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63892 DEBUGP("\t%s\n", name);
63893 }
63894- switch (m) {
63895- case 0: /* executable */
63896- mod->core_size = debug_align(mod->core_size);
63897- mod->core_text_size = mod->core_size;
63898- break;
63899- case 1: /* RO: text and ro-data */
63900- mod->core_size = debug_align(mod->core_size);
63901- mod->core_ro_size = mod->core_size;
63902- break;
63903- case 3: /* whole core */
63904- mod->core_size = debug_align(mod->core_size);
63905- break;
63906- }
63907 }
63908
63909 DEBUGP("Init section allocation order:\n");
63910@@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
63911 || s->sh_entsize != ~0UL
63912 || !strstarts(sname, ".init"))
63913 continue;
63914- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63915- | INIT_OFFSET_MASK);
63916+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63917+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63918+ else
63919+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63920+ s->sh_entsize |= INIT_OFFSET_MASK;
63921 DEBUGP("\t%s\n", sname);
63922 }
63923- switch (m) {
63924- case 0: /* executable */
63925- mod->init_size = debug_align(mod->init_size);
63926- mod->init_text_size = mod->init_size;
63927- break;
63928- case 1: /* RO: text and ro-data */
63929- mod->init_size = debug_align(mod->init_size);
63930- mod->init_ro_size = mod->init_size;
63931- break;
63932- case 3: /* whole init */
63933- mod->init_size = debug_align(mod->init_size);
63934- break;
63935- }
63936 }
63937 }
63938
63939@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
63940
63941 /* Put symbol section at end of init part of module. */
63942 symsect->sh_flags |= SHF_ALLOC;
63943- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63944+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63945 info->index.sym) | INIT_OFFSET_MASK;
63946 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
63947
63948@@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
63949 }
63950
63951 /* Append room for core symbols at end of core part. */
63952- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63953- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
63954+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63955+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
63956
63957 /* Put string table section at end of init part of module. */
63958 strsect->sh_flags |= SHF_ALLOC;
63959- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63960+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63961 info->index.str) | INIT_OFFSET_MASK;
63962 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
63963
63964 /* Append room for core symbols' strings at end of core part. */
63965- info->stroffs = mod->core_size;
63966+ info->stroffs = mod->core_size_rx;
63967 __set_bit(0, info->strmap);
63968- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
63969+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
63970 }
63971
63972 static void add_kallsyms(struct module *mod, const struct load_info *info)
63973@@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
63974 /* Make sure we get permanent strtab: don't use info->strtab. */
63975 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
63976
63977+ pax_open_kernel();
63978+
63979 /* Set types up while we still have access to sections. */
63980 for (i = 0; i < mod->num_symtab; i++)
63981 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
63982
63983- mod->core_symtab = dst = mod->module_core + info->symoffs;
63984+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
63985 src = mod->symtab;
63986 *dst = *src;
63987 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
63988@@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
63989 }
63990 mod->core_num_syms = ndst;
63991
63992- mod->core_strtab = s = mod->module_core + info->stroffs;
63993+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
63994 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
63995 if (test_bit(i, info->strmap))
63996 *++s = mod->strtab[i];
63997+
63998+ pax_close_kernel();
63999 }
64000 #else
64001 static inline void layout_symtab(struct module *mod, struct load_info *info)
64002@@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
64003 return size == 0 ? NULL : vmalloc_exec(size);
64004 }
64005
64006-static void *module_alloc_update_bounds(unsigned long size)
64007+static void *module_alloc_update_bounds_rw(unsigned long size)
64008 {
64009 void *ret = module_alloc(size);
64010
64011 if (ret) {
64012 mutex_lock(&module_mutex);
64013 /* Update module bounds. */
64014- if ((unsigned long)ret < module_addr_min)
64015- module_addr_min = (unsigned long)ret;
64016- if ((unsigned long)ret + size > module_addr_max)
64017- module_addr_max = (unsigned long)ret + size;
64018+ if ((unsigned long)ret < module_addr_min_rw)
64019+ module_addr_min_rw = (unsigned long)ret;
64020+ if ((unsigned long)ret + size > module_addr_max_rw)
64021+ module_addr_max_rw = (unsigned long)ret + size;
64022+ mutex_unlock(&module_mutex);
64023+ }
64024+ return ret;
64025+}
64026+
64027+static void *module_alloc_update_bounds_rx(unsigned long size)
64028+{
64029+ void *ret = module_alloc_exec(size);
64030+
64031+ if (ret) {
64032+ mutex_lock(&module_mutex);
64033+ /* Update module bounds. */
64034+ if ((unsigned long)ret < module_addr_min_rx)
64035+ module_addr_min_rx = (unsigned long)ret;
64036+ if ((unsigned long)ret + size > module_addr_max_rx)
64037+ module_addr_max_rx = (unsigned long)ret + size;
64038 mutex_unlock(&module_mutex);
64039 }
64040 return ret;
64041@@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
64042 static int check_modinfo(struct module *mod, struct load_info *info)
64043 {
64044 const char *modmagic = get_modinfo(info, "vermagic");
64045+ const char *license = get_modinfo(info, "license");
64046 int err;
64047
64048+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
64049+ if (!license || !license_is_gpl_compatible(license))
64050+ return -ENOEXEC;
64051+#endif
64052+
64053 /* This is allowed: modprobe --force will invalidate it. */
64054 if (!modmagic) {
64055 err = try_to_force_load(mod, "bad vermagic");
64056@@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
64057 }
64058
64059 /* Set up license info based on the info section */
64060- set_license(mod, get_modinfo(info, "license"));
64061+ set_license(mod, license);
64062
64063 return 0;
64064 }
64065@@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info)
64066 void *ptr;
64067
64068 /* Do the allocs. */
64069- ptr = module_alloc_update_bounds(mod->core_size);
64070+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64071 /*
64072 * The pointer to this block is stored in the module structure
64073 * which is inside the block. Just mark it as not being a
64074@@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info)
64075 if (!ptr)
64076 return -ENOMEM;
64077
64078- memset(ptr, 0, mod->core_size);
64079- mod->module_core = ptr;
64080+ memset(ptr, 0, mod->core_size_rw);
64081+ mod->module_core_rw = ptr;
64082
64083- ptr = module_alloc_update_bounds(mod->init_size);
64084+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64085 /*
64086 * The pointer to this block is stored in the module structure
64087 * which is inside the block. This block doesn't need to be
64088 * scanned as it contains data and code that will be freed
64089 * after the module is initialized.
64090 */
64091- kmemleak_ignore(ptr);
64092- if (!ptr && mod->init_size) {
64093- module_free(mod, mod->module_core);
64094+ kmemleak_not_leak(ptr);
64095+ if (!ptr && mod->init_size_rw) {
64096+ module_free(mod, mod->module_core_rw);
64097 return -ENOMEM;
64098 }
64099- memset(ptr, 0, mod->init_size);
64100- mod->module_init = ptr;
64101+ memset(ptr, 0, mod->init_size_rw);
64102+ mod->module_init_rw = ptr;
64103+
64104+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64105+ kmemleak_not_leak(ptr);
64106+ if (!ptr) {
64107+ module_free(mod, mod->module_init_rw);
64108+ module_free(mod, mod->module_core_rw);
64109+ return -ENOMEM;
64110+ }
64111+
64112+ pax_open_kernel();
64113+ memset(ptr, 0, mod->core_size_rx);
64114+ pax_close_kernel();
64115+ mod->module_core_rx = ptr;
64116+
64117+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64118+ kmemleak_not_leak(ptr);
64119+ if (!ptr && mod->init_size_rx) {
64120+ module_free_exec(mod, mod->module_core_rx);
64121+ module_free(mod, mod->module_init_rw);
64122+ module_free(mod, mod->module_core_rw);
64123+ return -ENOMEM;
64124+ }
64125+
64126+ pax_open_kernel();
64127+ memset(ptr, 0, mod->init_size_rx);
64128+ pax_close_kernel();
64129+ mod->module_init_rx = ptr;
64130
64131 /* Transfer each section which specifies SHF_ALLOC */
64132 DEBUGP("final section addresses:\n");
64133@@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info)
64134 if (!(shdr->sh_flags & SHF_ALLOC))
64135 continue;
64136
64137- if (shdr->sh_entsize & INIT_OFFSET_MASK)
64138- dest = mod->module_init
64139- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64140- else
64141- dest = mod->module_core + shdr->sh_entsize;
64142+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64143+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64144+ dest = mod->module_init_rw
64145+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64146+ else
64147+ dest = mod->module_init_rx
64148+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64149+ } else {
64150+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64151+ dest = mod->module_core_rw + shdr->sh_entsize;
64152+ else
64153+ dest = mod->module_core_rx + shdr->sh_entsize;
64154+ }
64155+
64156+ if (shdr->sh_type != SHT_NOBITS) {
64157+
64158+#ifdef CONFIG_PAX_KERNEXEC
64159+#ifdef CONFIG_X86_64
64160+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64161+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64162+#endif
64163+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64164+ pax_open_kernel();
64165+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64166+ pax_close_kernel();
64167+ } else
64168+#endif
64169
64170- if (shdr->sh_type != SHT_NOBITS)
64171 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64172+ }
64173 /* Update sh_addr to point to copy in image. */
64174- shdr->sh_addr = (unsigned long)dest;
64175+
64176+#ifdef CONFIG_PAX_KERNEXEC
64177+ if (shdr->sh_flags & SHF_EXECINSTR)
64178+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
64179+ else
64180+#endif
64181+
64182+ shdr->sh_addr = (unsigned long)dest;
64183 DEBUGP("\t0x%lx %s\n",
64184 shdr->sh_addr, info->secstrings + shdr->sh_name);
64185 }
64186@@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod)
64187 * Do it before processing of module parameters, so the module
64188 * can provide parameter accessor functions of its own.
64189 */
64190- if (mod->module_init)
64191- flush_icache_range((unsigned long)mod->module_init,
64192- (unsigned long)mod->module_init
64193- + mod->init_size);
64194- flush_icache_range((unsigned long)mod->module_core,
64195- (unsigned long)mod->module_core + mod->core_size);
64196+ if (mod->module_init_rx)
64197+ flush_icache_range((unsigned long)mod->module_init_rx,
64198+ (unsigned long)mod->module_init_rx
64199+ + mod->init_size_rx);
64200+ flush_icache_range((unsigned long)mod->module_core_rx,
64201+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
64202
64203 set_fs(old_fs);
64204 }
64205@@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
64206 {
64207 kfree(info->strmap);
64208 percpu_modfree(mod);
64209- module_free(mod, mod->module_init);
64210- module_free(mod, mod->module_core);
64211+ module_free_exec(mod, mod->module_init_rx);
64212+ module_free_exec(mod, mod->module_core_rx);
64213+ module_free(mod, mod->module_init_rw);
64214+ module_free(mod, mod->module_core_rw);
64215 }
64216
64217 int __weak module_finalize(const Elf_Ehdr *hdr,
64218@@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod,
64219 if (err)
64220 goto free_unload;
64221
64222+ /* Now copy in args */
64223+ mod->args = strndup_user(uargs, ~0UL >> 1);
64224+ if (IS_ERR(mod->args)) {
64225+ err = PTR_ERR(mod->args);
64226+ goto free_unload;
64227+ }
64228+
64229 /* Set up MODINFO_ATTR fields */
64230 setup_modinfo(mod, &info);
64231
64232+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64233+ {
64234+ char *p, *p2;
64235+
64236+ if (strstr(mod->args, "grsec_modharden_netdev")) {
64237+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64238+ err = -EPERM;
64239+ goto free_modinfo;
64240+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64241+ p += strlen("grsec_modharden_normal");
64242+ p2 = strstr(p, "_");
64243+ if (p2) {
64244+ *p2 = '\0';
64245+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64246+ *p2 = '_';
64247+ }
64248+ err = -EPERM;
64249+ goto free_modinfo;
64250+ }
64251+ }
64252+#endif
64253+
64254 /* Fix up syms, so that st_value is a pointer to location. */
64255 err = simplify_symbols(mod, &info);
64256 if (err < 0)
64257@@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod,
64258
64259 flush_module_icache(mod);
64260
64261- /* Now copy in args */
64262- mod->args = strndup_user(uargs, ~0UL >> 1);
64263- if (IS_ERR(mod->args)) {
64264- err = PTR_ERR(mod->args);
64265- goto free_arch_cleanup;
64266- }
64267-
64268 /* Mark state as coming so strong_try_module_get() ignores us. */
64269 mod->state = MODULE_STATE_COMING;
64270
64271@@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod,
64272 unlock:
64273 mutex_unlock(&module_mutex);
64274 synchronize_sched();
64275- kfree(mod->args);
64276- free_arch_cleanup:
64277 module_arch_cleanup(mod);
64278 free_modinfo:
64279 free_modinfo(mod);
64280+ kfree(mod->args);
64281 free_unload:
64282 module_unload_free(mod);
64283 free_module:
64284@@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64285 MODULE_STATE_COMING, mod);
64286
64287 /* Set RO and NX regions for core */
64288- set_section_ro_nx(mod->module_core,
64289- mod->core_text_size,
64290- mod->core_ro_size,
64291- mod->core_size);
64292+ set_section_ro_nx(mod->module_core_rx,
64293+ mod->core_size_rx,
64294+ mod->core_size_rx,
64295+ mod->core_size_rx);
64296
64297 /* Set RO and NX regions for init */
64298- set_section_ro_nx(mod->module_init,
64299- mod->init_text_size,
64300- mod->init_ro_size,
64301- mod->init_size);
64302+ set_section_ro_nx(mod->module_init_rx,
64303+ mod->init_size_rx,
64304+ mod->init_size_rx,
64305+ mod->init_size_rx);
64306
64307 do_mod_ctors(mod);
64308 /* Start the module */
64309@@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64310 mod->strtab = mod->core_strtab;
64311 #endif
64312 unset_module_init_ro_nx(mod);
64313- module_free(mod, mod->module_init);
64314- mod->module_init = NULL;
64315- mod->init_size = 0;
64316- mod->init_ro_size = 0;
64317- mod->init_text_size = 0;
64318+ module_free(mod, mod->module_init_rw);
64319+ module_free_exec(mod, mod->module_init_rx);
64320+ mod->module_init_rw = NULL;
64321+ mod->module_init_rx = NULL;
64322+ mod->init_size_rw = 0;
64323+ mod->init_size_rx = 0;
64324 mutex_unlock(&module_mutex);
64325
64326 return 0;
64327@@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod,
64328 unsigned long nextval;
64329
64330 /* At worse, next value is at end of module */
64331- if (within_module_init(addr, mod))
64332- nextval = (unsigned long)mod->module_init+mod->init_text_size;
64333+ if (within_module_init_rx(addr, mod))
64334+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64335+ else if (within_module_init_rw(addr, mod))
64336+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64337+ else if (within_module_core_rx(addr, mod))
64338+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64339+ else if (within_module_core_rw(addr, mod))
64340+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64341 else
64342- nextval = (unsigned long)mod->module_core+mod->core_text_size;
64343+ return NULL;
64344
64345 /* Scan for closest preceding symbol, and next symbol. (ELF
64346 starts real symbols at 1). */
64347@@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p)
64348 char buf[8];
64349
64350 seq_printf(m, "%s %u",
64351- mod->name, mod->init_size + mod->core_size);
64352+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64353 print_unload_info(m, mod);
64354
64355 /* Informative for users. */
64356@@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p)
64357 mod->state == MODULE_STATE_COMING ? "Loading":
64358 "Live");
64359 /* Used by oprofile and other similar tools. */
64360- seq_printf(m, " 0x%pK", mod->module_core);
64361+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
64362
64363 /* Taints info */
64364 if (mod->taints)
64365@@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = {
64366
64367 static int __init proc_modules_init(void)
64368 {
64369+#ifndef CONFIG_GRKERNSEC_HIDESYM
64370+#ifdef CONFIG_GRKERNSEC_PROC_USER
64371+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64372+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64373+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64374+#else
64375 proc_create("modules", 0, NULL, &proc_modules_operations);
64376+#endif
64377+#else
64378+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64379+#endif
64380 return 0;
64381 }
64382 module_init(proc_modules_init);
64383@@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr)
64384 {
64385 struct module *mod;
64386
64387- if (addr < module_addr_min || addr > module_addr_max)
64388+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64389+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
64390 return NULL;
64391
64392 list_for_each_entry_rcu(mod, &modules, list)
64393- if (within_module_core(addr, mod)
64394- || within_module_init(addr, mod))
64395+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
64396 return mod;
64397 return NULL;
64398 }
64399@@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr)
64400 */
64401 struct module *__module_text_address(unsigned long addr)
64402 {
64403- struct module *mod = __module_address(addr);
64404+ struct module *mod;
64405+
64406+#ifdef CONFIG_X86_32
64407+ addr = ktla_ktva(addr);
64408+#endif
64409+
64410+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64411+ return NULL;
64412+
64413+ mod = __module_address(addr);
64414+
64415 if (mod) {
64416 /* Make sure it's within the text section. */
64417- if (!within(addr, mod->module_init, mod->init_text_size)
64418- && !within(addr, mod->module_core, mod->core_text_size))
64419+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64420 mod = NULL;
64421 }
64422 return mod;
64423diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
64424index 7e3443f..b2a1e6b 100644
64425--- a/kernel/mutex-debug.c
64426+++ b/kernel/mutex-debug.c
64427@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
64428 }
64429
64430 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64431- struct thread_info *ti)
64432+ struct task_struct *task)
64433 {
64434 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64435
64436 /* Mark the current thread as blocked on the lock: */
64437- ti->task->blocked_on = waiter;
64438+ task->blocked_on = waiter;
64439 }
64440
64441 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64442- struct thread_info *ti)
64443+ struct task_struct *task)
64444 {
64445 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64446- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64447- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64448- ti->task->blocked_on = NULL;
64449+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64450+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64451+ task->blocked_on = NULL;
64452
64453 list_del_init(&waiter->list);
64454 waiter->task = NULL;
64455diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
64456index 0799fd3..d06ae3b 100644
64457--- a/kernel/mutex-debug.h
64458+++ b/kernel/mutex-debug.h
64459@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
64460 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64461 extern void debug_mutex_add_waiter(struct mutex *lock,
64462 struct mutex_waiter *waiter,
64463- struct thread_info *ti);
64464+ struct task_struct *task);
64465 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64466- struct thread_info *ti);
64467+ struct task_struct *task);
64468 extern void debug_mutex_unlock(struct mutex *lock);
64469 extern void debug_mutex_init(struct mutex *lock, const char *name,
64470 struct lock_class_key *key);
64471diff --git a/kernel/mutex.c b/kernel/mutex.c
64472index 89096dd..f91ebc5 100644
64473--- a/kernel/mutex.c
64474+++ b/kernel/mutex.c
64475@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64476 spin_lock_mutex(&lock->wait_lock, flags);
64477
64478 debug_mutex_lock_common(lock, &waiter);
64479- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64480+ debug_mutex_add_waiter(lock, &waiter, task);
64481
64482 /* add waiting tasks to the end of the waitqueue (FIFO): */
64483 list_add_tail(&waiter.list, &lock->wait_list);
64484@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64485 * TASK_UNINTERRUPTIBLE case.)
64486 */
64487 if (unlikely(signal_pending_state(state, task))) {
64488- mutex_remove_waiter(lock, &waiter,
64489- task_thread_info(task));
64490+ mutex_remove_waiter(lock, &waiter, task);
64491 mutex_release(&lock->dep_map, 1, ip);
64492 spin_unlock_mutex(&lock->wait_lock, flags);
64493
64494@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64495 done:
64496 lock_acquired(&lock->dep_map, ip);
64497 /* got the lock - rejoice! */
64498- mutex_remove_waiter(lock, &waiter, current_thread_info());
64499+ mutex_remove_waiter(lock, &waiter, task);
64500 mutex_set_owner(lock);
64501
64502 /* set it to 0 if there are no waiters left: */
64503diff --git a/kernel/padata.c b/kernel/padata.c
64504index b452599..5d68f4e 100644
64505--- a/kernel/padata.c
64506+++ b/kernel/padata.c
64507@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
64508 padata->pd = pd;
64509 padata->cb_cpu = cb_cpu;
64510
64511- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64512- atomic_set(&pd->seq_nr, -1);
64513+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64514+ atomic_set_unchecked(&pd->seq_nr, -1);
64515
64516- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64517+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64518
64519 target_cpu = padata_cpu_hash(padata);
64520 queue = per_cpu_ptr(pd->pqueue, target_cpu);
64521@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
64522 padata_init_pqueues(pd);
64523 padata_init_squeues(pd);
64524 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
64525- atomic_set(&pd->seq_nr, -1);
64526+ atomic_set_unchecked(&pd->seq_nr, -1);
64527 atomic_set(&pd->reorder_objects, 0);
64528 atomic_set(&pd->refcnt, 0);
64529 pd->pinst = pinst;
64530diff --git a/kernel/panic.c b/kernel/panic.c
64531index b2659360..5972a0f 100644
64532--- a/kernel/panic.c
64533+++ b/kernel/panic.c
64534@@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
64535 va_end(args);
64536 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
64537 #ifdef CONFIG_DEBUG_BUGVERBOSE
64538- dump_stack();
64539+ /*
64540+ * Avoid nested stack-dumping if a panic occurs during oops processing
64541+ */
64542+ if (!oops_in_progress)
64543+ dump_stack();
64544 #endif
64545
64546 /*
64547@@ -373,7 +377,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
64548 const char *board;
64549
64550 printk(KERN_WARNING "------------[ cut here ]------------\n");
64551- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64552+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64553 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64554 if (board)
64555 printk(KERN_WARNING "Hardware name: %s\n", board);
64556@@ -428,7 +432,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64557 */
64558 void __stack_chk_fail(void)
64559 {
64560- panic("stack-protector: Kernel stack is corrupted in: %p\n",
64561+ dump_stack();
64562+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64563 __builtin_return_address(0));
64564 }
64565 EXPORT_SYMBOL(__stack_chk_fail);
64566diff --git a/kernel/pid.c b/kernel/pid.c
64567index fa5f722..0c93e57 100644
64568--- a/kernel/pid.c
64569+++ b/kernel/pid.c
64570@@ -33,6 +33,7 @@
64571 #include <linux/rculist.h>
64572 #include <linux/bootmem.h>
64573 #include <linux/hash.h>
64574+#include <linux/security.h>
64575 #include <linux/pid_namespace.h>
64576 #include <linux/init_task.h>
64577 #include <linux/syscalls.h>
64578@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
64579
64580 int pid_max = PID_MAX_DEFAULT;
64581
64582-#define RESERVED_PIDS 300
64583+#define RESERVED_PIDS 500
64584
64585 int pid_max_min = RESERVED_PIDS + 1;
64586 int pid_max_max = PID_MAX_LIMIT;
64587@@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
64588 */
64589 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64590 {
64591+ struct task_struct *task;
64592+
64593 rcu_lockdep_assert(rcu_read_lock_held(),
64594 "find_task_by_pid_ns() needs rcu_read_lock()"
64595 " protection");
64596- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64597+
64598+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64599+
64600+ if (gr_pid_is_chrooted(task))
64601+ return NULL;
64602+
64603+ return task;
64604 }
64605
64606 struct task_struct *find_task_by_vpid(pid_t vnr)
64607@@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
64608 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64609 }
64610
64611+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64612+{
64613+ rcu_lockdep_assert(rcu_read_lock_held(),
64614+ "find_task_by_pid_ns() needs rcu_read_lock()"
64615+ " protection");
64616+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64617+}
64618+
64619 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64620 {
64621 struct pid *pid;
64622diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
64623index e7cb76d..75eceb3 100644
64624--- a/kernel/posix-cpu-timers.c
64625+++ b/kernel/posix-cpu-timers.c
64626@@ -6,6 +6,7 @@
64627 #include <linux/posix-timers.h>
64628 #include <linux/errno.h>
64629 #include <linux/math64.h>
64630+#include <linux/security.h>
64631 #include <asm/uaccess.h>
64632 #include <linux/kernel_stat.h>
64633 #include <trace/events/timer.h>
64634@@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
64635
64636 static __init int init_posix_cpu_timers(void)
64637 {
64638- struct k_clock process = {
64639+ static struct k_clock process = {
64640 .clock_getres = process_cpu_clock_getres,
64641 .clock_get = process_cpu_clock_get,
64642 .timer_create = process_cpu_timer_create,
64643 .nsleep = process_cpu_nsleep,
64644 .nsleep_restart = process_cpu_nsleep_restart,
64645 };
64646- struct k_clock thread = {
64647+ static struct k_clock thread = {
64648 .clock_getres = thread_cpu_clock_getres,
64649 .clock_get = thread_cpu_clock_get,
64650 .timer_create = thread_cpu_timer_create,
64651diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
64652index 69185ae..cc2847a 100644
64653--- a/kernel/posix-timers.c
64654+++ b/kernel/posix-timers.c
64655@@ -43,6 +43,7 @@
64656 #include <linux/idr.h>
64657 #include <linux/posix-clock.h>
64658 #include <linux/posix-timers.h>
64659+#include <linux/grsecurity.h>
64660 #include <linux/syscalls.h>
64661 #include <linux/wait.h>
64662 #include <linux/workqueue.h>
64663@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
64664 * which we beg off on and pass to do_sys_settimeofday().
64665 */
64666
64667-static struct k_clock posix_clocks[MAX_CLOCKS];
64668+static struct k_clock *posix_clocks[MAX_CLOCKS];
64669
64670 /*
64671 * These ones are defined below.
64672@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
64673 */
64674 static __init int init_posix_timers(void)
64675 {
64676- struct k_clock clock_realtime = {
64677+ static struct k_clock clock_realtime = {
64678 .clock_getres = hrtimer_get_res,
64679 .clock_get = posix_clock_realtime_get,
64680 .clock_set = posix_clock_realtime_set,
64681@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
64682 .timer_get = common_timer_get,
64683 .timer_del = common_timer_del,
64684 };
64685- struct k_clock clock_monotonic = {
64686+ static struct k_clock clock_monotonic = {
64687 .clock_getres = hrtimer_get_res,
64688 .clock_get = posix_ktime_get_ts,
64689 .nsleep = common_nsleep,
64690@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
64691 .timer_get = common_timer_get,
64692 .timer_del = common_timer_del,
64693 };
64694- struct k_clock clock_monotonic_raw = {
64695+ static struct k_clock clock_monotonic_raw = {
64696 .clock_getres = hrtimer_get_res,
64697 .clock_get = posix_get_monotonic_raw,
64698 };
64699- struct k_clock clock_realtime_coarse = {
64700+ static struct k_clock clock_realtime_coarse = {
64701 .clock_getres = posix_get_coarse_res,
64702 .clock_get = posix_get_realtime_coarse,
64703 };
64704- struct k_clock clock_monotonic_coarse = {
64705+ static struct k_clock clock_monotonic_coarse = {
64706 .clock_getres = posix_get_coarse_res,
64707 .clock_get = posix_get_monotonic_coarse,
64708 };
64709- struct k_clock clock_boottime = {
64710+ static struct k_clock clock_boottime = {
64711 .clock_getres = hrtimer_get_res,
64712 .clock_get = posix_get_boottime,
64713 .nsleep = common_nsleep,
64714@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
64715 return;
64716 }
64717
64718- posix_clocks[clock_id] = *new_clock;
64719+ posix_clocks[clock_id] = new_clock;
64720 }
64721 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
64722
64723@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
64724 return (id & CLOCKFD_MASK) == CLOCKFD ?
64725 &clock_posix_dynamic : &clock_posix_cpu;
64726
64727- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
64728+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
64729 return NULL;
64730- return &posix_clocks[id];
64731+ return posix_clocks[id];
64732 }
64733
64734 static int common_timer_create(struct k_itimer *new_timer)
64735@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
64736 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64737 return -EFAULT;
64738
64739+ /* only the CLOCK_REALTIME clock can be set, all other clocks
64740+ have their clock_set fptr set to a nosettime dummy function
64741+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64742+ call common_clock_set, which calls do_sys_settimeofday, which
64743+ we hook
64744+ */
64745+
64746 return kc->clock_set(which_clock, &new_tp);
64747 }
64748
64749diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
64750index d523593..68197a4 100644
64751--- a/kernel/power/poweroff.c
64752+++ b/kernel/power/poweroff.c
64753@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
64754 .enable_mask = SYSRQ_ENABLE_BOOT,
64755 };
64756
64757-static int pm_sysrq_init(void)
64758+static int __init pm_sysrq_init(void)
64759 {
64760 register_sysrq_key('o', &sysrq_poweroff_op);
64761 return 0;
64762diff --git a/kernel/power/process.c b/kernel/power/process.c
64763index addbbe5..f9e32e0 100644
64764--- a/kernel/power/process.c
64765+++ b/kernel/power/process.c
64766@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
64767 u64 elapsed_csecs64;
64768 unsigned int elapsed_csecs;
64769 bool wakeup = false;
64770+ bool timedout = false;
64771
64772 do_gettimeofday(&start);
64773
64774@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
64775
64776 while (true) {
64777 todo = 0;
64778+ if (time_after(jiffies, end_time))
64779+ timedout = true;
64780 read_lock(&tasklist_lock);
64781 do_each_thread(g, p) {
64782 if (frozen(p) || !freezable(p))
64783@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
64784 * try_to_stop() after schedule() in ptrace/signal
64785 * stop sees TIF_FREEZE.
64786 */
64787- if (!task_is_stopped_or_traced(p) &&
64788- !freezer_should_skip(p))
64789+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64790 todo++;
64791+ if (timedout) {
64792+ printk(KERN_ERR "Task refusing to freeze:\n");
64793+ sched_show_task(p);
64794+ }
64795+ }
64796 } while_each_thread(g, p);
64797 read_unlock(&tasklist_lock);
64798
64799@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
64800 todo += wq_busy;
64801 }
64802
64803- if (!todo || time_after(jiffies, end_time))
64804+ if (!todo || timedout)
64805 break;
64806
64807 if (pm_wakeup_pending()) {
64808diff --git a/kernel/printk.c b/kernel/printk.c
64809index 7982a0a..2095fdc 100644
64810--- a/kernel/printk.c
64811+++ b/kernel/printk.c
64812@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
64813 if (from_file && type != SYSLOG_ACTION_OPEN)
64814 return 0;
64815
64816+#ifdef CONFIG_GRKERNSEC_DMESG
64817+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
64818+ return -EPERM;
64819+#endif
64820+
64821 if (syslog_action_restricted(type)) {
64822 if (capable(CAP_SYSLOG))
64823 return 0;
64824diff --git a/kernel/profile.c b/kernel/profile.c
64825index 76b8e77..a2930e8 100644
64826--- a/kernel/profile.c
64827+++ b/kernel/profile.c
64828@@ -39,7 +39,7 @@ struct profile_hit {
64829 /* Oprofile timer tick hook */
64830 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64831
64832-static atomic_t *prof_buffer;
64833+static atomic_unchecked_t *prof_buffer;
64834 static unsigned long prof_len, prof_shift;
64835
64836 int prof_on __read_mostly;
64837@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
64838 hits[i].pc = 0;
64839 continue;
64840 }
64841- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64842+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64843 hits[i].hits = hits[i].pc = 0;
64844 }
64845 }
64846@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
64847 * Add the current hit(s) and flush the write-queue out
64848 * to the global buffer:
64849 */
64850- atomic_add(nr_hits, &prof_buffer[pc]);
64851+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64852 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64853- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64854+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64855 hits[i].pc = hits[i].hits = 0;
64856 }
64857 out:
64858@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
64859 {
64860 unsigned long pc;
64861 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64862- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64863+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64864 }
64865 #endif /* !CONFIG_SMP */
64866
64867@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
64868 return -EFAULT;
64869 buf++; p++; count--; read++;
64870 }
64871- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64872+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64873 if (copy_to_user(buf, (void *)pnt, count))
64874 return -EFAULT;
64875 read += count;
64876@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
64877 }
64878 #endif
64879 profile_discard_flip_buffers();
64880- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64881+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64882 return count;
64883 }
64884
64885diff --git a/kernel/ptrace.c b/kernel/ptrace.c
64886index 78ab24a..332c915 100644
64887--- a/kernel/ptrace.c
64888+++ b/kernel/ptrace.c
64889@@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
64890 return ret;
64891 }
64892
64893-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64894+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64895+ unsigned int log)
64896 {
64897 const struct cred *cred = current_cred(), *tcred;
64898
64899@@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64900 cred->gid == tcred->sgid &&
64901 cred->gid == tcred->gid))
64902 goto ok;
64903- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
64904+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
64905+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
64906 goto ok;
64907 rcu_read_unlock();
64908 return -EPERM;
64909@@ -207,7 +209,9 @@ ok:
64910 smp_rmb();
64911 if (task->mm)
64912 dumpable = get_dumpable(task->mm);
64913- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
64914+ if (!dumpable &&
64915+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
64916+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
64917 return -EPERM;
64918
64919 return security_ptrace_access_check(task, mode);
64920@@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
64921 {
64922 int err;
64923 task_lock(task);
64924- err = __ptrace_may_access(task, mode);
64925+ err = __ptrace_may_access(task, mode, 0);
64926+ task_unlock(task);
64927+ return !err;
64928+}
64929+
64930+bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
64931+{
64932+ return __ptrace_may_access(task, mode, 0);
64933+}
64934+
64935+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
64936+{
64937+ int err;
64938+ task_lock(task);
64939+ err = __ptrace_may_access(task, mode, 1);
64940 task_unlock(task);
64941 return !err;
64942 }
64943@@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
64944 goto out;
64945
64946 task_lock(task);
64947- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
64948+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
64949 task_unlock(task);
64950 if (retval)
64951 goto unlock_creds;
64952@@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request,
64953 task->ptrace = PT_PTRACED;
64954 if (seize)
64955 task->ptrace |= PT_SEIZED;
64956- if (task_ns_capable(task, CAP_SYS_PTRACE))
64957+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
64958 task->ptrace |= PT_PTRACE_CAP;
64959
64960 __ptrace_link(task, current);
64961@@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
64962 break;
64963 return -EIO;
64964 }
64965- if (copy_to_user(dst, buf, retval))
64966+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
64967 return -EFAULT;
64968 copied += retval;
64969 src += retval;
64970@@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request,
64971 bool seized = child->ptrace & PT_SEIZED;
64972 int ret = -EIO;
64973 siginfo_t siginfo, *si;
64974- void __user *datavp = (void __user *) data;
64975+ void __user *datavp = (__force void __user *) data;
64976 unsigned long __user *datalp = datavp;
64977 unsigned long flags;
64978
64979@@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
64980 goto out;
64981 }
64982
64983+ if (gr_handle_ptrace(child, request)) {
64984+ ret = -EPERM;
64985+ goto out_put_task_struct;
64986+ }
64987+
64988 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
64989 ret = ptrace_attach(child, request, data);
64990 /*
64991 * Some architectures need to do book-keeping after
64992 * a ptrace attach.
64993 */
64994- if (!ret)
64995+ if (!ret) {
64996 arch_ptrace_attach(child);
64997+ gr_audit_ptrace(child);
64998+ }
64999 goto out_put_task_struct;
65000 }
65001
65002@@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
65003 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65004 if (copied != sizeof(tmp))
65005 return -EIO;
65006- return put_user(tmp, (unsigned long __user *)data);
65007+ return put_user(tmp, (__force unsigned long __user *)data);
65008 }
65009
65010 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65011@@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
65012 goto out;
65013 }
65014
65015+ if (gr_handle_ptrace(child, request)) {
65016+ ret = -EPERM;
65017+ goto out_put_task_struct;
65018+ }
65019+
65020 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65021 ret = ptrace_attach(child, request, data);
65022 /*
65023 * Some architectures need to do book-keeping after
65024 * a ptrace attach.
65025 */
65026- if (!ret)
65027+ if (!ret) {
65028 arch_ptrace_attach(child);
65029+ gr_audit_ptrace(child);
65030+ }
65031 goto out_put_task_struct;
65032 }
65033
65034diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
65035index 764825c..3aa6ac4 100644
65036--- a/kernel/rcutorture.c
65037+++ b/kernel/rcutorture.c
65038@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
65039 { 0 };
65040 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65041 { 0 };
65042-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65043-static atomic_t n_rcu_torture_alloc;
65044-static atomic_t n_rcu_torture_alloc_fail;
65045-static atomic_t n_rcu_torture_free;
65046-static atomic_t n_rcu_torture_mberror;
65047-static atomic_t n_rcu_torture_error;
65048+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65049+static atomic_unchecked_t n_rcu_torture_alloc;
65050+static atomic_unchecked_t n_rcu_torture_alloc_fail;
65051+static atomic_unchecked_t n_rcu_torture_free;
65052+static atomic_unchecked_t n_rcu_torture_mberror;
65053+static atomic_unchecked_t n_rcu_torture_error;
65054 static long n_rcu_torture_boost_ktrerror;
65055 static long n_rcu_torture_boost_rterror;
65056 static long n_rcu_torture_boost_failure;
65057@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65058
65059 spin_lock_bh(&rcu_torture_lock);
65060 if (list_empty(&rcu_torture_freelist)) {
65061- atomic_inc(&n_rcu_torture_alloc_fail);
65062+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65063 spin_unlock_bh(&rcu_torture_lock);
65064 return NULL;
65065 }
65066- atomic_inc(&n_rcu_torture_alloc);
65067+ atomic_inc_unchecked(&n_rcu_torture_alloc);
65068 p = rcu_torture_freelist.next;
65069 list_del_init(p);
65070 spin_unlock_bh(&rcu_torture_lock);
65071@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65072 static void
65073 rcu_torture_free(struct rcu_torture *p)
65074 {
65075- atomic_inc(&n_rcu_torture_free);
65076+ atomic_inc_unchecked(&n_rcu_torture_free);
65077 spin_lock_bh(&rcu_torture_lock);
65078 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65079 spin_unlock_bh(&rcu_torture_lock);
65080@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65081 i = rp->rtort_pipe_count;
65082 if (i > RCU_TORTURE_PIPE_LEN)
65083 i = RCU_TORTURE_PIPE_LEN;
65084- atomic_inc(&rcu_torture_wcount[i]);
65085+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65086 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65087 rp->rtort_mbtest = 0;
65088 rcu_torture_free(rp);
65089@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
65090 i = rp->rtort_pipe_count;
65091 if (i > RCU_TORTURE_PIPE_LEN)
65092 i = RCU_TORTURE_PIPE_LEN;
65093- atomic_inc(&rcu_torture_wcount[i]);
65094+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65095 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65096 rp->rtort_mbtest = 0;
65097 list_del(&rp->rtort_free);
65098@@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
65099 i = old_rp->rtort_pipe_count;
65100 if (i > RCU_TORTURE_PIPE_LEN)
65101 i = RCU_TORTURE_PIPE_LEN;
65102- atomic_inc(&rcu_torture_wcount[i]);
65103+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65104 old_rp->rtort_pipe_count++;
65105 cur_ops->deferred_free(old_rp);
65106 }
65107@@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
65108 return;
65109 }
65110 if (p->rtort_mbtest == 0)
65111- atomic_inc(&n_rcu_torture_mberror);
65112+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65113 spin_lock(&rand_lock);
65114 cur_ops->read_delay(&rand);
65115 n_rcu_torture_timers++;
65116@@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
65117 continue;
65118 }
65119 if (p->rtort_mbtest == 0)
65120- atomic_inc(&n_rcu_torture_mberror);
65121+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65122 cur_ops->read_delay(&rand);
65123 preempt_disable();
65124 pipe_count = p->rtort_pipe_count;
65125@@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
65126 rcu_torture_current,
65127 rcu_torture_current_version,
65128 list_empty(&rcu_torture_freelist),
65129- atomic_read(&n_rcu_torture_alloc),
65130- atomic_read(&n_rcu_torture_alloc_fail),
65131- atomic_read(&n_rcu_torture_free),
65132- atomic_read(&n_rcu_torture_mberror),
65133+ atomic_read_unchecked(&n_rcu_torture_alloc),
65134+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65135+ atomic_read_unchecked(&n_rcu_torture_free),
65136+ atomic_read_unchecked(&n_rcu_torture_mberror),
65137 n_rcu_torture_boost_ktrerror,
65138 n_rcu_torture_boost_rterror,
65139 n_rcu_torture_boost_failure,
65140 n_rcu_torture_boosts,
65141 n_rcu_torture_timers);
65142- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65143+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65144 n_rcu_torture_boost_ktrerror != 0 ||
65145 n_rcu_torture_boost_rterror != 0 ||
65146 n_rcu_torture_boost_failure != 0)
65147@@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
65148 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65149 if (i > 1) {
65150 cnt += sprintf(&page[cnt], "!!! ");
65151- atomic_inc(&n_rcu_torture_error);
65152+ atomic_inc_unchecked(&n_rcu_torture_error);
65153 WARN_ON_ONCE(1);
65154 }
65155 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65156@@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
65157 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65158 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65159 cnt += sprintf(&page[cnt], " %d",
65160- atomic_read(&rcu_torture_wcount[i]));
65161+ atomic_read_unchecked(&rcu_torture_wcount[i]));
65162 }
65163 cnt += sprintf(&page[cnt], "\n");
65164 if (cur_ops->stats)
65165@@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
65166
65167 if (cur_ops->cleanup)
65168 cur_ops->cleanup();
65169- if (atomic_read(&n_rcu_torture_error))
65170+ if (atomic_read_unchecked(&n_rcu_torture_error))
65171 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65172 else
65173 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65174@@ -1465,17 +1465,17 @@ rcu_torture_init(void)
65175
65176 rcu_torture_current = NULL;
65177 rcu_torture_current_version = 0;
65178- atomic_set(&n_rcu_torture_alloc, 0);
65179- atomic_set(&n_rcu_torture_alloc_fail, 0);
65180- atomic_set(&n_rcu_torture_free, 0);
65181- atomic_set(&n_rcu_torture_mberror, 0);
65182- atomic_set(&n_rcu_torture_error, 0);
65183+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65184+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65185+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65186+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65187+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65188 n_rcu_torture_boost_ktrerror = 0;
65189 n_rcu_torture_boost_rterror = 0;
65190 n_rcu_torture_boost_failure = 0;
65191 n_rcu_torture_boosts = 0;
65192 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65193- atomic_set(&rcu_torture_wcount[i], 0);
65194+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65195 for_each_possible_cpu(cpu) {
65196 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65197 per_cpu(rcu_torture_count, cpu)[i] = 0;
65198diff --git a/kernel/rcutree.c b/kernel/rcutree.c
65199index 6b76d81..7afc1b3 100644
65200--- a/kernel/rcutree.c
65201+++ b/kernel/rcutree.c
65202@@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
65203 trace_rcu_dyntick("Start");
65204 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65205 smp_mb__before_atomic_inc(); /* See above. */
65206- atomic_inc(&rdtp->dynticks);
65207+ atomic_inc_unchecked(&rdtp->dynticks);
65208 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65209- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65210+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65211 local_irq_restore(flags);
65212 }
65213
65214@@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
65215 return;
65216 }
65217 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65218- atomic_inc(&rdtp->dynticks);
65219+ atomic_inc_unchecked(&rdtp->dynticks);
65220 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65221 smp_mb__after_atomic_inc(); /* See above. */
65222- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65223+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65224 trace_rcu_dyntick("End");
65225 local_irq_restore(flags);
65226 }
65227@@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
65228 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65229
65230 if (rdtp->dynticks_nmi_nesting == 0 &&
65231- (atomic_read(&rdtp->dynticks) & 0x1))
65232+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65233 return;
65234 rdtp->dynticks_nmi_nesting++;
65235 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65236- atomic_inc(&rdtp->dynticks);
65237+ atomic_inc_unchecked(&rdtp->dynticks);
65238 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65239 smp_mb__after_atomic_inc(); /* See above. */
65240- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65241+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65242 }
65243
65244 /**
65245@@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
65246 return;
65247 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65248 smp_mb__before_atomic_inc(); /* See above. */
65249- atomic_inc(&rdtp->dynticks);
65250+ atomic_inc_unchecked(&rdtp->dynticks);
65251 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65252- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65253+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65254 }
65255
65256 /**
65257@@ -474,7 +474,7 @@ void rcu_irq_exit(void)
65258 */
65259 static int dyntick_save_progress_counter(struct rcu_data *rdp)
65260 {
65261- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65262+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65263 return 0;
65264 }
65265
65266@@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
65267 unsigned int curr;
65268 unsigned int snap;
65269
65270- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
65271+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65272 snap = (unsigned int)rdp->dynticks_snap;
65273
65274 /*
65275@@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
65276 /*
65277 * Do RCU core processing for the current CPU.
65278 */
65279-static void rcu_process_callbacks(struct softirq_action *unused)
65280+static void rcu_process_callbacks(void)
65281 {
65282 trace_rcu_utilization("Start RCU core");
65283 __rcu_process_callbacks(&rcu_sched_state,
65284diff --git a/kernel/rcutree.h b/kernel/rcutree.h
65285index 849ce9e..74bc9de 100644
65286--- a/kernel/rcutree.h
65287+++ b/kernel/rcutree.h
65288@@ -86,7 +86,7 @@
65289 struct rcu_dynticks {
65290 int dynticks_nesting; /* Track irq/process nesting level. */
65291 int dynticks_nmi_nesting; /* Track NMI nesting level. */
65292- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
65293+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
65294 };
65295
65296 /* RCU's kthread states for tracing. */
65297diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
65298index 4b9b9f8..2326053 100644
65299--- a/kernel/rcutree_plugin.h
65300+++ b/kernel/rcutree_plugin.h
65301@@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
65302
65303 /* Clean up and exit. */
65304 smp_mb(); /* ensure expedited GP seen before counter increment. */
65305- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
65306+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
65307 unlock_mb_ret:
65308 mutex_unlock(&sync_rcu_preempt_exp_mutex);
65309 mb_ret:
65310@@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
65311
65312 #else /* #ifndef CONFIG_SMP */
65313
65314-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
65315-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
65316+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
65317+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
65318
65319 static int synchronize_sched_expedited_cpu_stop(void *data)
65320 {
65321@@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
65322 int firstsnap, s, snap, trycount = 0;
65323
65324 /* Note that atomic_inc_return() implies full memory barrier. */
65325- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
65326+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
65327 get_online_cpus();
65328
65329 /*
65330@@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
65331 }
65332
65333 /* Check to see if someone else did our work for us. */
65334- s = atomic_read(&sync_sched_expedited_done);
65335+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65336 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
65337 smp_mb(); /* ensure test happens before caller kfree */
65338 return;
65339@@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
65340 * grace period works for us.
65341 */
65342 get_online_cpus();
65343- snap = atomic_read(&sync_sched_expedited_started) - 1;
65344+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
65345 smp_mb(); /* ensure read is before try_stop_cpus(). */
65346 }
65347
65348@@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
65349 * than we did beat us to the punch.
65350 */
65351 do {
65352- s = atomic_read(&sync_sched_expedited_done);
65353+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65354 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
65355 smp_mb(); /* ensure test happens before caller kfree */
65356 break;
65357 }
65358- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
65359+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
65360
65361 put_online_cpus();
65362 }
65363@@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
65364 for_each_online_cpu(thatcpu) {
65365 if (thatcpu == cpu)
65366 continue;
65367- snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
65368+ snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
65369 thatcpu).dynticks);
65370 smp_mb(); /* Order sampling of snap with end of grace period. */
65371 if ((snap & 0x1) != 0) {
65372diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
65373index 9feffa4..54058df 100644
65374--- a/kernel/rcutree_trace.c
65375+++ b/kernel/rcutree_trace.c
65376@@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
65377 rdp->qs_pending);
65378 #ifdef CONFIG_NO_HZ
65379 seq_printf(m, " dt=%d/%d/%d df=%lu",
65380- atomic_read(&rdp->dynticks->dynticks),
65381+ atomic_read_unchecked(&rdp->dynticks->dynticks),
65382 rdp->dynticks->dynticks_nesting,
65383 rdp->dynticks->dynticks_nmi_nesting,
65384 rdp->dynticks_fqs);
65385@@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
65386 rdp->qs_pending);
65387 #ifdef CONFIG_NO_HZ
65388 seq_printf(m, ",%d,%d,%d,%lu",
65389- atomic_read(&rdp->dynticks->dynticks),
65390+ atomic_read_unchecked(&rdp->dynticks->dynticks),
65391 rdp->dynticks->dynticks_nesting,
65392 rdp->dynticks->dynticks_nmi_nesting,
65393 rdp->dynticks_fqs);
65394diff --git a/kernel/resource.c b/kernel/resource.c
65395index 7640b3a..5879283 100644
65396--- a/kernel/resource.c
65397+++ b/kernel/resource.c
65398@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
65399
65400 static int __init ioresources_init(void)
65401 {
65402+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65403+#ifdef CONFIG_GRKERNSEC_PROC_USER
65404+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65405+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65406+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65407+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65408+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65409+#endif
65410+#else
65411 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65412 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65413+#endif
65414 return 0;
65415 }
65416 __initcall(ioresources_init);
65417diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
65418index 3d9f31c..7fefc9e 100644
65419--- a/kernel/rtmutex-tester.c
65420+++ b/kernel/rtmutex-tester.c
65421@@ -20,7 +20,7 @@
65422 #define MAX_RT_TEST_MUTEXES 8
65423
65424 static spinlock_t rttest_lock;
65425-static atomic_t rttest_event;
65426+static atomic_unchecked_t rttest_event;
65427
65428 struct test_thread_data {
65429 int opcode;
65430@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65431
65432 case RTTEST_LOCKCONT:
65433 td->mutexes[td->opdata] = 1;
65434- td->event = atomic_add_return(1, &rttest_event);
65435+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65436 return 0;
65437
65438 case RTTEST_RESET:
65439@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65440 return 0;
65441
65442 case RTTEST_RESETEVENT:
65443- atomic_set(&rttest_event, 0);
65444+ atomic_set_unchecked(&rttest_event, 0);
65445 return 0;
65446
65447 default:
65448@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65449 return ret;
65450
65451 td->mutexes[id] = 1;
65452- td->event = atomic_add_return(1, &rttest_event);
65453+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65454 rt_mutex_lock(&mutexes[id]);
65455- td->event = atomic_add_return(1, &rttest_event);
65456+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65457 td->mutexes[id] = 4;
65458 return 0;
65459
65460@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65461 return ret;
65462
65463 td->mutexes[id] = 1;
65464- td->event = atomic_add_return(1, &rttest_event);
65465+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65466 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65467- td->event = atomic_add_return(1, &rttest_event);
65468+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65469 td->mutexes[id] = ret ? 0 : 4;
65470 return ret ? -EINTR : 0;
65471
65472@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65473 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65474 return ret;
65475
65476- td->event = atomic_add_return(1, &rttest_event);
65477+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65478 rt_mutex_unlock(&mutexes[id]);
65479- td->event = atomic_add_return(1, &rttest_event);
65480+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65481 td->mutexes[id] = 0;
65482 return 0;
65483
65484@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65485 break;
65486
65487 td->mutexes[dat] = 2;
65488- td->event = atomic_add_return(1, &rttest_event);
65489+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65490 break;
65491
65492 default:
65493@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65494 return;
65495
65496 td->mutexes[dat] = 3;
65497- td->event = atomic_add_return(1, &rttest_event);
65498+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65499 break;
65500
65501 case RTTEST_LOCKNOWAIT:
65502@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65503 return;
65504
65505 td->mutexes[dat] = 1;
65506- td->event = atomic_add_return(1, &rttest_event);
65507+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65508 return;
65509
65510 default:
65511diff --git a/kernel/sched.c b/kernel/sched.c
65512index d6b149c..896cbb8 100644
65513--- a/kernel/sched.c
65514+++ b/kernel/sched.c
65515@@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
65516 BUG(); /* the idle class will always have a runnable task */
65517 }
65518
65519+#ifdef CONFIG_GRKERNSEC_SETXID
65520+extern void gr_delayed_cred_worker(void);
65521+static inline void gr_cred_schedule(void)
65522+{
65523+ if (unlikely(current->delayed_cred))
65524+ gr_delayed_cred_worker();
65525+}
65526+#else
65527+static inline void gr_cred_schedule(void)
65528+{
65529+}
65530+#endif
65531+
65532 /*
65533 * __schedule() is the main scheduler function.
65534 */
65535@@ -4408,6 +4421,8 @@ need_resched:
65536
65537 schedule_debug(prev);
65538
65539+ gr_cred_schedule();
65540+
65541 if (sched_feat(HRTICK))
65542 hrtick_clear(rq);
65543
65544@@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
65545 /* convert nice value [19,-20] to rlimit style value [1,40] */
65546 int nice_rlim = 20 - nice;
65547
65548+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65549+
65550 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
65551 capable(CAP_SYS_NICE));
65552 }
65553@@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65554 if (nice > 19)
65555 nice = 19;
65556
65557- if (increment < 0 && !can_nice(current, nice))
65558+ if (increment < 0 && (!can_nice(current, nice) ||
65559+ gr_handle_chroot_nice()))
65560 return -EPERM;
65561
65562 retval = security_task_setnice(current, nice);
65563@@ -5288,6 +5306,7 @@ recheck:
65564 unsigned long rlim_rtprio =
65565 task_rlimit(p, RLIMIT_RTPRIO);
65566
65567+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
65568 /* can't set/change the rt policy */
65569 if (policy != p->policy && !rlim_rtprio)
65570 return -EPERM;
65571diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
65572index 429242f..d7cca82 100644
65573--- a/kernel/sched_autogroup.c
65574+++ b/kernel/sched_autogroup.c
65575@@ -7,7 +7,7 @@
65576
65577 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
65578 static struct autogroup autogroup_default;
65579-static atomic_t autogroup_seq_nr;
65580+static atomic_unchecked_t autogroup_seq_nr;
65581
65582 static void __init autogroup_init(struct task_struct *init_task)
65583 {
65584@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
65585
65586 kref_init(&ag->kref);
65587 init_rwsem(&ag->lock);
65588- ag->id = atomic_inc_return(&autogroup_seq_nr);
65589+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
65590 ag->tg = tg;
65591 #ifdef CONFIG_RT_GROUP_SCHED
65592 /*
65593diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
65594index 8a39fa3..34f3dbc 100644
65595--- a/kernel/sched_fair.c
65596+++ b/kernel/sched_fair.c
65597@@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
65598 * run_rebalance_domains is triggered when needed from the scheduler tick.
65599 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
65600 */
65601-static void run_rebalance_domains(struct softirq_action *h)
65602+static void run_rebalance_domains(void)
65603 {
65604 int this_cpu = smp_processor_id();
65605 struct rq *this_rq = cpu_rq(this_cpu);
65606diff --git a/kernel/signal.c b/kernel/signal.c
65607index 2065515..aed2987 100644
65608--- a/kernel/signal.c
65609+++ b/kernel/signal.c
65610@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
65611
65612 int print_fatal_signals __read_mostly;
65613
65614-static void __user *sig_handler(struct task_struct *t, int sig)
65615+static __sighandler_t sig_handler(struct task_struct *t, int sig)
65616 {
65617 return t->sighand->action[sig - 1].sa.sa_handler;
65618 }
65619
65620-static int sig_handler_ignored(void __user *handler, int sig)
65621+static int sig_handler_ignored(__sighandler_t handler, int sig)
65622 {
65623 /* Is it explicitly or implicitly ignored? */
65624 return handler == SIG_IGN ||
65625@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
65626 static int sig_task_ignored(struct task_struct *t, int sig,
65627 int from_ancestor_ns)
65628 {
65629- void __user *handler;
65630+ __sighandler_t handler;
65631
65632 handler = sig_handler(t, sig);
65633
65634@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
65635 atomic_inc(&user->sigpending);
65636 rcu_read_unlock();
65637
65638+ if (!override_rlimit)
65639+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65640+
65641 if (override_rlimit ||
65642 atomic_read(&user->sigpending) <=
65643 task_rlimit(t, RLIMIT_SIGPENDING)) {
65644@@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
65645
65646 int unhandled_signal(struct task_struct *tsk, int sig)
65647 {
65648- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65649+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65650 if (is_global_init(tsk))
65651 return 1;
65652 if (handler != SIG_IGN && handler != SIG_DFL)
65653@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
65654 }
65655 }
65656
65657+ /* allow glibc communication via tgkill to other threads in our
65658+ thread group */
65659+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65660+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65661+ && gr_handle_signal(t, sig))
65662+ return -EPERM;
65663+
65664 return security_task_kill(t, info, sig, 0);
65665 }
65666
65667@@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
65668 return send_signal(sig, info, p, 1);
65669 }
65670
65671-static int
65672+int
65673 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65674 {
65675 return send_signal(sig, info, t, 0);
65676@@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65677 unsigned long int flags;
65678 int ret, blocked, ignored;
65679 struct k_sigaction *action;
65680+ int is_unhandled = 0;
65681
65682 spin_lock_irqsave(&t->sighand->siglock, flags);
65683 action = &t->sighand->action[sig-1];
65684@@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65685 }
65686 if (action->sa.sa_handler == SIG_DFL)
65687 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65688+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65689+ is_unhandled = 1;
65690 ret = specific_send_sig_info(sig, info, t);
65691 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65692
65693+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
65694+ normal operation */
65695+ if (is_unhandled) {
65696+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65697+ gr_handle_crash(t, sig);
65698+ }
65699+
65700 return ret;
65701 }
65702
65703@@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
65704 ret = check_kill_permission(sig, info, p);
65705 rcu_read_unlock();
65706
65707- if (!ret && sig)
65708+ if (!ret && sig) {
65709 ret = do_send_sig_info(sig, info, p, true);
65710+ if (!ret)
65711+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65712+ }
65713
65714 return ret;
65715 }
65716@@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
65717 int error = -ESRCH;
65718
65719 rcu_read_lock();
65720- p = find_task_by_vpid(pid);
65721+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65722+ /* allow glibc communication via tgkill to other threads in our
65723+ thread group */
65724+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65725+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
65726+ p = find_task_by_vpid_unrestricted(pid);
65727+ else
65728+#endif
65729+ p = find_task_by_vpid(pid);
65730 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65731 error = check_kill_permission(sig, info, p);
65732 /*
65733diff --git a/kernel/smp.c b/kernel/smp.c
65734index db197d6..17aef0b 100644
65735--- a/kernel/smp.c
65736+++ b/kernel/smp.c
65737@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
65738 }
65739 EXPORT_SYMBOL(smp_call_function);
65740
65741-void ipi_call_lock(void)
65742+void ipi_call_lock(void) __acquires(call_function.lock)
65743 {
65744 raw_spin_lock(&call_function.lock);
65745 }
65746
65747-void ipi_call_unlock(void)
65748+void ipi_call_unlock(void) __releases(call_function.lock)
65749 {
65750 raw_spin_unlock(&call_function.lock);
65751 }
65752
65753-void ipi_call_lock_irq(void)
65754+void ipi_call_lock_irq(void) __acquires(call_function.lock)
65755 {
65756 raw_spin_lock_irq(&call_function.lock);
65757 }
65758
65759-void ipi_call_unlock_irq(void)
65760+void ipi_call_unlock_irq(void) __releases(call_function.lock)
65761 {
65762 raw_spin_unlock_irq(&call_function.lock);
65763 }
65764diff --git a/kernel/softirq.c b/kernel/softirq.c
65765index 2c71d91..1021f81 100644
65766--- a/kernel/softirq.c
65767+++ b/kernel/softirq.c
65768@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
65769
65770 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65771
65772-char *softirq_to_name[NR_SOFTIRQS] = {
65773+const char * const softirq_to_name[NR_SOFTIRQS] = {
65774 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65775 "TASKLET", "SCHED", "HRTIMER", "RCU"
65776 };
65777@@ -235,7 +235,7 @@ restart:
65778 kstat_incr_softirqs_this_cpu(vec_nr);
65779
65780 trace_softirq_entry(vec_nr);
65781- h->action(h);
65782+ h->action();
65783 trace_softirq_exit(vec_nr);
65784 if (unlikely(prev_count != preempt_count())) {
65785 printk(KERN_ERR "huh, entered softirq %u %s %p"
65786@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
65787 local_irq_restore(flags);
65788 }
65789
65790-void open_softirq(int nr, void (*action)(struct softirq_action *))
65791+void open_softirq(int nr, void (*action)(void))
65792 {
65793- softirq_vec[nr].action = action;
65794+ pax_open_kernel();
65795+ *(void **)&softirq_vec[nr].action = action;
65796+ pax_close_kernel();
65797 }
65798
65799 /*
65800@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
65801
65802 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65803
65804-static void tasklet_action(struct softirq_action *a)
65805+static void tasklet_action(void)
65806 {
65807 struct tasklet_struct *list;
65808
65809@@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
65810 }
65811 }
65812
65813-static void tasklet_hi_action(struct softirq_action *a)
65814+static void tasklet_hi_action(void)
65815 {
65816 struct tasklet_struct *list;
65817
65818diff --git a/kernel/sys.c b/kernel/sys.c
65819index 481611f..0754d86 100644
65820--- a/kernel/sys.c
65821+++ b/kernel/sys.c
65822@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
65823 error = -EACCES;
65824 goto out;
65825 }
65826+
65827+ if (gr_handle_chroot_setpriority(p, niceval)) {
65828+ error = -EACCES;
65829+ goto out;
65830+ }
65831+
65832 no_nice = security_task_setnice(p, niceval);
65833 if (no_nice) {
65834 error = no_nice;
65835@@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
65836 goto error;
65837 }
65838
65839+ if (gr_check_group_change(new->gid, new->egid, -1))
65840+ goto error;
65841+
65842 if (rgid != (gid_t) -1 ||
65843 (egid != (gid_t) -1 && egid != old->gid))
65844 new->sgid = new->egid;
65845@@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65846 old = current_cred();
65847
65848 retval = -EPERM;
65849+
65850+ if (gr_check_group_change(gid, gid, gid))
65851+ goto error;
65852+
65853 if (nsown_capable(CAP_SETGID))
65854 new->gid = new->egid = new->sgid = new->fsgid = gid;
65855 else if (gid == old->gid || gid == old->sgid)
65856@@ -618,7 +631,7 @@ error:
65857 /*
65858 * change the user struct in a credentials set to match the new UID
65859 */
65860-static int set_user(struct cred *new)
65861+int set_user(struct cred *new)
65862 {
65863 struct user_struct *new_user;
65864
65865@@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
65866 goto error;
65867 }
65868
65869+ if (gr_check_user_change(new->uid, new->euid, -1))
65870+ goto error;
65871+
65872 if (new->uid != old->uid) {
65873 retval = set_user(new);
65874 if (retval < 0)
65875@@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65876 old = current_cred();
65877
65878 retval = -EPERM;
65879+
65880+ if (gr_check_crash_uid(uid))
65881+ goto error;
65882+ if (gr_check_user_change(uid, uid, uid))
65883+ goto error;
65884+
65885 if (nsown_capable(CAP_SETUID)) {
65886 new->suid = new->uid = uid;
65887 if (uid != old->uid) {
65888@@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
65889 goto error;
65890 }
65891
65892+ if (gr_check_user_change(ruid, euid, -1))
65893+ goto error;
65894+
65895 if (ruid != (uid_t) -1) {
65896 new->uid = ruid;
65897 if (ruid != old->uid) {
65898@@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
65899 goto error;
65900 }
65901
65902+ if (gr_check_group_change(rgid, egid, -1))
65903+ goto error;
65904+
65905 if (rgid != (gid_t) -1)
65906 new->gid = rgid;
65907 if (egid != (gid_t) -1)
65908@@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65909 old = current_cred();
65910 old_fsuid = old->fsuid;
65911
65912+ if (gr_check_user_change(-1, -1, uid))
65913+ goto error;
65914+
65915 if (uid == old->uid || uid == old->euid ||
65916 uid == old->suid || uid == old->fsuid ||
65917 nsown_capable(CAP_SETUID)) {
65918@@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65919 }
65920 }
65921
65922+error:
65923 abort_creds(new);
65924 return old_fsuid;
65925
65926@@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65927 if (gid == old->gid || gid == old->egid ||
65928 gid == old->sgid || gid == old->fsgid ||
65929 nsown_capable(CAP_SETGID)) {
65930+ if (gr_check_group_change(-1, -1, gid))
65931+ goto error;
65932+
65933 if (gid != old_fsgid) {
65934 new->fsgid = gid;
65935 goto change_okay;
65936 }
65937 }
65938
65939+error:
65940 abort_creds(new);
65941 return old_fsgid;
65942
65943@@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
65944 }
65945 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
65946 snprintf(buf, len, "2.6.%u%s", v, rest);
65947- ret = copy_to_user(release, buf, len);
65948+ if (len > sizeof(buf))
65949+ ret = -EFAULT;
65950+ else
65951+ ret = copy_to_user(release, buf, len);
65952 }
65953 return ret;
65954 }
65955@@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
65956 return -EFAULT;
65957
65958 down_read(&uts_sem);
65959- error = __copy_to_user(&name->sysname, &utsname()->sysname,
65960+ error = __copy_to_user(name->sysname, &utsname()->sysname,
65961 __OLD_UTS_LEN);
65962 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
65963- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
65964+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
65965 __OLD_UTS_LEN);
65966 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
65967- error |= __copy_to_user(&name->release, &utsname()->release,
65968+ error |= __copy_to_user(name->release, &utsname()->release,
65969 __OLD_UTS_LEN);
65970 error |= __put_user(0, name->release + __OLD_UTS_LEN);
65971- error |= __copy_to_user(&name->version, &utsname()->version,
65972+ error |= __copy_to_user(name->version, &utsname()->version,
65973 __OLD_UTS_LEN);
65974 error |= __put_user(0, name->version + __OLD_UTS_LEN);
65975- error |= __copy_to_user(&name->machine, &utsname()->machine,
65976+ error |= __copy_to_user(name->machine, &utsname()->machine,
65977 __OLD_UTS_LEN);
65978 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
65979 up_read(&uts_sem);
65980@@ -1720,7 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
65981 error = get_dumpable(me->mm);
65982 break;
65983 case PR_SET_DUMPABLE:
65984- if (arg2 < 0 || arg2 > 1) {
65985+ if (arg2 > 1) {
65986 error = -EINVAL;
65987 break;
65988 }
65989diff --git a/kernel/sysctl.c b/kernel/sysctl.c
65990index ae27196..7506d69 100644
65991--- a/kernel/sysctl.c
65992+++ b/kernel/sysctl.c
65993@@ -86,6 +86,13 @@
65994
65995
65996 #if defined(CONFIG_SYSCTL)
65997+#include <linux/grsecurity.h>
65998+#include <linux/grinternal.h>
65999+
66000+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66001+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66002+ const int op);
66003+extern int gr_handle_chroot_sysctl(const int op);
66004
66005 /* External variables not in a header file. */
66006 extern int sysctl_overcommit_memory;
66007@@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
66008 }
66009
66010 #endif
66011+extern struct ctl_table grsecurity_table[];
66012
66013 static struct ctl_table root_table[];
66014 static struct ctl_table_root sysctl_table_root;
66015@@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
66016 int sysctl_legacy_va_layout;
66017 #endif
66018
66019+#ifdef CONFIG_PAX_SOFTMODE
66020+static ctl_table pax_table[] = {
66021+ {
66022+ .procname = "softmode",
66023+ .data = &pax_softmode,
66024+ .maxlen = sizeof(unsigned int),
66025+ .mode = 0600,
66026+ .proc_handler = &proc_dointvec,
66027+ },
66028+
66029+ { }
66030+};
66031+#endif
66032+
66033 /* The default sysctl tables: */
66034
66035 static struct ctl_table root_table[] = {
66036@@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
66037 #endif
66038
66039 static struct ctl_table kern_table[] = {
66040+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66041+ {
66042+ .procname = "grsecurity",
66043+ .mode = 0500,
66044+ .child = grsecurity_table,
66045+ },
66046+#endif
66047+
66048+#ifdef CONFIG_PAX_SOFTMODE
66049+ {
66050+ .procname = "pax",
66051+ .mode = 0500,
66052+ .child = pax_table,
66053+ },
66054+#endif
66055+
66056 {
66057 .procname = "sched_child_runs_first",
66058 .data = &sysctl_sched_child_runs_first,
66059@@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
66060 .data = &modprobe_path,
66061 .maxlen = KMOD_PATH_LEN,
66062 .mode = 0644,
66063- .proc_handler = proc_dostring,
66064+ .proc_handler = proc_dostring_modpriv,
66065 },
66066 {
66067 .procname = "modules_disabled",
66068@@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
66069 .extra1 = &zero,
66070 .extra2 = &one,
66071 },
66072+#endif
66073 {
66074 .procname = "kptr_restrict",
66075 .data = &kptr_restrict,
66076 .maxlen = sizeof(int),
66077 .mode = 0644,
66078 .proc_handler = proc_dmesg_restrict,
66079+#ifdef CONFIG_GRKERNSEC_HIDESYM
66080+ .extra1 = &two,
66081+#else
66082 .extra1 = &zero,
66083+#endif
66084 .extra2 = &two,
66085 },
66086-#endif
66087 {
66088 .procname = "ngroups_max",
66089 .data = &ngroups_max,
66090@@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = {
66091 .proc_handler = proc_dointvec_minmax,
66092 .extra1 = &zero,
66093 },
66094+ {
66095+ .procname = "heap_stack_gap",
66096+ .data = &sysctl_heap_stack_gap,
66097+ .maxlen = sizeof(sysctl_heap_stack_gap),
66098+ .mode = 0644,
66099+ .proc_handler = proc_doulongvec_minmax,
66100+ },
66101 #else
66102 {
66103 .procname = "nr_trim_pages",
66104@@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op)
66105 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66106 {
66107 int mode;
66108+ int error;
66109+
66110+ if (table->parent != NULL && table->parent->procname != NULL &&
66111+ table->procname != NULL &&
66112+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66113+ return -EACCES;
66114+ if (gr_handle_chroot_sysctl(op))
66115+ return -EACCES;
66116+ error = gr_handle_sysctl(table, op);
66117+ if (error)
66118+ return error;
66119
66120 if (root->permissions)
66121 mode = root->permissions(root, current->nsproxy, table);
66122@@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write,
66123 buffer, lenp, ppos);
66124 }
66125
66126+int proc_dostring_modpriv(struct ctl_table *table, int write,
66127+ void __user *buffer, size_t *lenp, loff_t *ppos)
66128+{
66129+ if (write && !capable(CAP_SYS_MODULE))
66130+ return -EPERM;
66131+
66132+ return _proc_do_string(table->data, table->maxlen, write,
66133+ buffer, lenp, ppos);
66134+}
66135+
66136 static size_t proc_skip_spaces(char **buf)
66137 {
66138 size_t ret;
66139@@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
66140 len = strlen(tmp);
66141 if (len > *size)
66142 len = *size;
66143+ if (len > sizeof(tmp))
66144+ len = sizeof(tmp);
66145 if (copy_to_user(*buf, tmp, len))
66146 return -EFAULT;
66147 *size -= len;
66148@@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
66149 *i = val;
66150 } else {
66151 val = convdiv * (*i) / convmul;
66152- if (!first)
66153+ if (!first) {
66154 err = proc_put_char(&buffer, &left, '\t');
66155+ if (err)
66156+ break;
66157+ }
66158 err = proc_put_long(&buffer, &left, val, false);
66159 if (err)
66160 break;
66161@@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write,
66162 return -ENOSYS;
66163 }
66164
66165+int proc_dostring_modpriv(struct ctl_table *table, int write,
66166+ void __user *buffer, size_t *lenp, loff_t *ppos)
66167+{
66168+ return -ENOSYS;
66169+}
66170+
66171 int proc_dointvec(struct ctl_table *table, int write,
66172 void __user *buffer, size_t *lenp, loff_t *ppos)
66173 {
66174@@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66175 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66176 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66177 EXPORT_SYMBOL(proc_dostring);
66178+EXPORT_SYMBOL(proc_dostring_modpriv);
66179 EXPORT_SYMBOL(proc_doulongvec_minmax);
66180 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66181 EXPORT_SYMBOL(register_sysctl_table);
66182diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
66183index a650694..aaeeb20 100644
66184--- a/kernel/sysctl_binary.c
66185+++ b/kernel/sysctl_binary.c
66186@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
66187 int i;
66188
66189 set_fs(KERNEL_DS);
66190- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66191+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66192 set_fs(old_fs);
66193 if (result < 0)
66194 goto out_kfree;
66195@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
66196 }
66197
66198 set_fs(KERNEL_DS);
66199- result = vfs_write(file, buffer, str - buffer, &pos);
66200+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66201 set_fs(old_fs);
66202 if (result < 0)
66203 goto out_kfree;
66204@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
66205 int i;
66206
66207 set_fs(KERNEL_DS);
66208- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66209+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66210 set_fs(old_fs);
66211 if (result < 0)
66212 goto out_kfree;
66213@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
66214 }
66215
66216 set_fs(KERNEL_DS);
66217- result = vfs_write(file, buffer, str - buffer, &pos);
66218+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66219 set_fs(old_fs);
66220 if (result < 0)
66221 goto out_kfree;
66222@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
66223 int i;
66224
66225 set_fs(KERNEL_DS);
66226- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66227+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66228 set_fs(old_fs);
66229 if (result < 0)
66230 goto out;
66231@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66232 __le16 dnaddr;
66233
66234 set_fs(KERNEL_DS);
66235- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66236+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66237 set_fs(old_fs);
66238 if (result < 0)
66239 goto out;
66240@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66241 le16_to_cpu(dnaddr) & 0x3ff);
66242
66243 set_fs(KERNEL_DS);
66244- result = vfs_write(file, buf, len, &pos);
66245+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
66246 set_fs(old_fs);
66247 if (result < 0)
66248 goto out;
66249diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
66250index 362da65..ab8ef8c 100644
66251--- a/kernel/sysctl_check.c
66252+++ b/kernel/sysctl_check.c
66253@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
66254 set_fail(&fail, table, "Directory with extra2");
66255 } else {
66256 if ((table->proc_handler == proc_dostring) ||
66257+ (table->proc_handler == proc_dostring_modpriv) ||
66258 (table->proc_handler == proc_dointvec) ||
66259 (table->proc_handler == proc_dointvec_minmax) ||
66260 (table->proc_handler == proc_dointvec_jiffies) ||
66261diff --git a/kernel/taskstats.c b/kernel/taskstats.c
66262index e660464..c8b9e67 100644
66263--- a/kernel/taskstats.c
66264+++ b/kernel/taskstats.c
66265@@ -27,9 +27,12 @@
66266 #include <linux/cgroup.h>
66267 #include <linux/fs.h>
66268 #include <linux/file.h>
66269+#include <linux/grsecurity.h>
66270 #include <net/genetlink.h>
66271 #include <linux/atomic.h>
66272
66273+extern int gr_is_taskstats_denied(int pid);
66274+
66275 /*
66276 * Maximum length of a cpumask that can be specified in
66277 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66278@@ -556,6 +559,9 @@ err:
66279
66280 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
66281 {
66282+ if (gr_is_taskstats_denied(current->pid))
66283+ return -EACCES;
66284+
66285 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
66286 return cmd_attr_register_cpumask(info);
66287 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
66288diff --git a/kernel/time.c b/kernel/time.c
66289index 73e416d..cfc6f69 100644
66290--- a/kernel/time.c
66291+++ b/kernel/time.c
66292@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
66293 return error;
66294
66295 if (tz) {
66296+ /* we log in do_settimeofday called below, so don't log twice
66297+ */
66298+ if (!tv)
66299+ gr_log_timechange();
66300+
66301 /* SMP safe, global irq locking makes it work. */
66302 sys_tz = *tz;
66303 update_vsyscall_tz();
66304diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
66305index 8a46f5d..bbe6f9c 100644
66306--- a/kernel/time/alarmtimer.c
66307+++ b/kernel/time/alarmtimer.c
66308@@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
66309 struct platform_device *pdev;
66310 int error = 0;
66311 int i;
66312- struct k_clock alarm_clock = {
66313+ static struct k_clock alarm_clock = {
66314 .clock_getres = alarm_clock_getres,
66315 .clock_get = alarm_clock_get,
66316 .timer_create = alarm_timer_create,
66317diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
66318index fd4a7b1..fae5c2a 100644
66319--- a/kernel/time/tick-broadcast.c
66320+++ b/kernel/time/tick-broadcast.c
66321@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
66322 * then clear the broadcast bit.
66323 */
66324 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66325- int cpu = smp_processor_id();
66326+ cpu = smp_processor_id();
66327
66328 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66329 tick_broadcast_clear_oneshot(cpu);
66330diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
66331index 2378413..be455fd 100644
66332--- a/kernel/time/timekeeping.c
66333+++ b/kernel/time/timekeeping.c
66334@@ -14,6 +14,7 @@
66335 #include <linux/init.h>
66336 #include <linux/mm.h>
66337 #include <linux/sched.h>
66338+#include <linux/grsecurity.h>
66339 #include <linux/syscore_ops.h>
66340 #include <linux/clocksource.h>
66341 #include <linux/jiffies.h>
66342@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
66343 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66344 return -EINVAL;
66345
66346+ gr_log_timechange();
66347+
66348 write_seqlock_irqsave(&xtime_lock, flags);
66349
66350 timekeeping_forward_now();
66351diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
66352index 3258455..f35227d 100644
66353--- a/kernel/time/timer_list.c
66354+++ b/kernel/time/timer_list.c
66355@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
66356
66357 static void print_name_offset(struct seq_file *m, void *sym)
66358 {
66359+#ifdef CONFIG_GRKERNSEC_HIDESYM
66360+ SEQ_printf(m, "<%p>", NULL);
66361+#else
66362 char symname[KSYM_NAME_LEN];
66363
66364 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66365 SEQ_printf(m, "<%pK>", sym);
66366 else
66367 SEQ_printf(m, "%s", symname);
66368+#endif
66369 }
66370
66371 static void
66372@@ -112,7 +116,11 @@ next_one:
66373 static void
66374 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66375 {
66376+#ifdef CONFIG_GRKERNSEC_HIDESYM
66377+ SEQ_printf(m, " .base: %p\n", NULL);
66378+#else
66379 SEQ_printf(m, " .base: %pK\n", base);
66380+#endif
66381 SEQ_printf(m, " .index: %d\n",
66382 base->index);
66383 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66384@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
66385 {
66386 struct proc_dir_entry *pe;
66387
66388+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66389+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66390+#else
66391 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66392+#endif
66393 if (!pe)
66394 return -ENOMEM;
66395 return 0;
66396diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
66397index 0b537f2..9e71eca 100644
66398--- a/kernel/time/timer_stats.c
66399+++ b/kernel/time/timer_stats.c
66400@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66401 static unsigned long nr_entries;
66402 static struct entry entries[MAX_ENTRIES];
66403
66404-static atomic_t overflow_count;
66405+static atomic_unchecked_t overflow_count;
66406
66407 /*
66408 * The entries are in a hash-table, for fast lookup:
66409@@ -140,7 +140,7 @@ static void reset_entries(void)
66410 nr_entries = 0;
66411 memset(entries, 0, sizeof(entries));
66412 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66413- atomic_set(&overflow_count, 0);
66414+ atomic_set_unchecked(&overflow_count, 0);
66415 }
66416
66417 static struct entry *alloc_entry(void)
66418@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66419 if (likely(entry))
66420 entry->count++;
66421 else
66422- atomic_inc(&overflow_count);
66423+ atomic_inc_unchecked(&overflow_count);
66424
66425 out_unlock:
66426 raw_spin_unlock_irqrestore(lock, flags);
66427@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66428
66429 static void print_name_offset(struct seq_file *m, unsigned long addr)
66430 {
66431+#ifdef CONFIG_GRKERNSEC_HIDESYM
66432+ seq_printf(m, "<%p>", NULL);
66433+#else
66434 char symname[KSYM_NAME_LEN];
66435
66436 if (lookup_symbol_name(addr, symname) < 0)
66437 seq_printf(m, "<%p>", (void *)addr);
66438 else
66439 seq_printf(m, "%s", symname);
66440+#endif
66441 }
66442
66443 static int tstats_show(struct seq_file *m, void *v)
66444@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
66445
66446 seq_puts(m, "Timer Stats Version: v0.2\n");
66447 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66448- if (atomic_read(&overflow_count))
66449+ if (atomic_read_unchecked(&overflow_count))
66450 seq_printf(m, "Overflow: %d entries\n",
66451- atomic_read(&overflow_count));
66452+ atomic_read_unchecked(&overflow_count));
66453
66454 for (i = 0; i < nr_entries; i++) {
66455 entry = entries + i;
66456@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
66457 {
66458 struct proc_dir_entry *pe;
66459
66460+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66461+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66462+#else
66463 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66464+#endif
66465 if (!pe)
66466 return -ENOMEM;
66467 return 0;
66468diff --git a/kernel/timer.c b/kernel/timer.c
66469index 9c3c62b..441690e 100644
66470--- a/kernel/timer.c
66471+++ b/kernel/timer.c
66472@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66473 /*
66474 * This function runs timers and the timer-tq in bottom half context.
66475 */
66476-static void run_timer_softirq(struct softirq_action *h)
66477+static void run_timer_softirq(void)
66478 {
66479 struct tvec_base *base = __this_cpu_read(tvec_bases);
66480
66481diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
66482index 16fc34a..efd8bb8 100644
66483--- a/kernel/trace/blktrace.c
66484+++ b/kernel/trace/blktrace.c
66485@@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
66486 struct blk_trace *bt = filp->private_data;
66487 char buf[16];
66488
66489- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66490+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66491
66492 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66493 }
66494@@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
66495 return 1;
66496
66497 bt = buf->chan->private_data;
66498- atomic_inc(&bt->dropped);
66499+ atomic_inc_unchecked(&bt->dropped);
66500 return 0;
66501 }
66502
66503@@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
66504
66505 bt->dir = dir;
66506 bt->dev = dev;
66507- atomic_set(&bt->dropped, 0);
66508+ atomic_set_unchecked(&bt->dropped, 0);
66509
66510 ret = -EIO;
66511 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66512diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
66513index 25b4f4d..6f4772d 100644
66514--- a/kernel/trace/ftrace.c
66515+++ b/kernel/trace/ftrace.c
66516@@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
66517 if (unlikely(ftrace_disabled))
66518 return 0;
66519
66520+ ret = ftrace_arch_code_modify_prepare();
66521+ FTRACE_WARN_ON(ret);
66522+ if (ret)
66523+ return 0;
66524+
66525 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66526+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66527 if (ret) {
66528 ftrace_bug(ret, ip);
66529- return 0;
66530 }
66531- return 1;
66532+ return ret ? 0 : 1;
66533 }
66534
66535 /*
66536@@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
66537
66538 int
66539 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
66540- void *data)
66541+ void *data)
66542 {
66543 struct ftrace_func_probe *entry;
66544 struct ftrace_page *pg;
66545diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
66546index f2bd275..adaf3a2 100644
66547--- a/kernel/trace/trace.c
66548+++ b/kernel/trace/trace.c
66549@@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = {
66550 };
66551 #endif
66552
66553-static struct dentry *d_tracer;
66554-
66555 struct dentry *tracing_init_dentry(void)
66556 {
66557+ static struct dentry *d_tracer;
66558 static int once;
66559
66560 if (d_tracer)
66561@@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void)
66562 return d_tracer;
66563 }
66564
66565-static struct dentry *d_percpu;
66566-
66567 struct dentry *tracing_dentry_percpu(void)
66568 {
66569+ static struct dentry *d_percpu;
66570 static int once;
66571 struct dentry *d_tracer;
66572
66573diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
66574index c212a7f..7b02394 100644
66575--- a/kernel/trace/trace_events.c
66576+++ b/kernel/trace/trace_events.c
66577@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
66578 struct ftrace_module_file_ops {
66579 struct list_head list;
66580 struct module *mod;
66581- struct file_operations id;
66582- struct file_operations enable;
66583- struct file_operations format;
66584- struct file_operations filter;
66585 };
66586
66587 static struct ftrace_module_file_ops *
66588@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
66589
66590 file_ops->mod = mod;
66591
66592- file_ops->id = ftrace_event_id_fops;
66593- file_ops->id.owner = mod;
66594-
66595- file_ops->enable = ftrace_enable_fops;
66596- file_ops->enable.owner = mod;
66597-
66598- file_ops->filter = ftrace_event_filter_fops;
66599- file_ops->filter.owner = mod;
66600-
66601- file_ops->format = ftrace_event_format_fops;
66602- file_ops->format.owner = mod;
66603+ pax_open_kernel();
66604+ *(void **)&mod->trace_id.owner = mod;
66605+ *(void **)&mod->trace_enable.owner = mod;
66606+ *(void **)&mod->trace_filter.owner = mod;
66607+ *(void **)&mod->trace_format.owner = mod;
66608+ pax_close_kernel();
66609
66610 list_add(&file_ops->list, &ftrace_module_file_list);
66611
66612@@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
66613
66614 for_each_event(call, start, end) {
66615 __trace_add_event_call(*call, mod,
66616- &file_ops->id, &file_ops->enable,
66617- &file_ops->filter, &file_ops->format);
66618+ &mod->trace_id, &mod->trace_enable,
66619+ &mod->trace_filter, &mod->trace_format);
66620 }
66621 }
66622
66623diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
66624index 00d527c..7c5b1a3 100644
66625--- a/kernel/trace/trace_kprobe.c
66626+++ b/kernel/trace/trace_kprobe.c
66627@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
66628 long ret;
66629 int maxlen = get_rloc_len(*(u32 *)dest);
66630 u8 *dst = get_rloc_data(dest);
66631- u8 *src = addr;
66632+ const u8 __user *src = (const u8 __force_user *)addr;
66633 mm_segment_t old_fs = get_fs();
66634 if (!maxlen)
66635 return;
66636@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
66637 pagefault_disable();
66638 do
66639 ret = __copy_from_user_inatomic(dst++, src++, 1);
66640- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
66641+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
66642 dst[-1] = '\0';
66643 pagefault_enable();
66644 set_fs(old_fs);
66645@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
66646 ((u8 *)get_rloc_data(dest))[0] = '\0';
66647 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
66648 } else
66649- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
66650+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
66651 get_rloc_offs(*(u32 *)dest));
66652 }
66653 /* Return the length of string -- including null terminal byte */
66654@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
66655 set_fs(KERNEL_DS);
66656 pagefault_disable();
66657 do {
66658- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
66659+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
66660 len++;
66661 } while (c && ret == 0 && len < MAX_STRING_SIZE);
66662 pagefault_enable();
66663diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
66664index fd3c8aa..5f324a6 100644
66665--- a/kernel/trace/trace_mmiotrace.c
66666+++ b/kernel/trace/trace_mmiotrace.c
66667@@ -24,7 +24,7 @@ struct header_iter {
66668 static struct trace_array *mmio_trace_array;
66669 static bool overrun_detected;
66670 static unsigned long prev_overruns;
66671-static atomic_t dropped_count;
66672+static atomic_unchecked_t dropped_count;
66673
66674 static void mmio_reset_data(struct trace_array *tr)
66675 {
66676@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
66677
66678 static unsigned long count_overruns(struct trace_iterator *iter)
66679 {
66680- unsigned long cnt = atomic_xchg(&dropped_count, 0);
66681+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66682 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66683
66684 if (over > prev_overruns)
66685@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
66686 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66687 sizeof(*entry), 0, pc);
66688 if (!event) {
66689- atomic_inc(&dropped_count);
66690+ atomic_inc_unchecked(&dropped_count);
66691 return;
66692 }
66693 entry = ring_buffer_event_data(event);
66694@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
66695 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66696 sizeof(*entry), 0, pc);
66697 if (!event) {
66698- atomic_inc(&dropped_count);
66699+ atomic_inc_unchecked(&dropped_count);
66700 return;
66701 }
66702 entry = ring_buffer_event_data(event);
66703diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
66704index 5199930..26c73a0 100644
66705--- a/kernel/trace/trace_output.c
66706+++ b/kernel/trace/trace_output.c
66707@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
66708
66709 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66710 if (!IS_ERR(p)) {
66711- p = mangle_path(s->buffer + s->len, p, "\n");
66712+ p = mangle_path(s->buffer + s->len, p, "\n\\");
66713 if (p) {
66714 s->len = p - s->buffer;
66715 return 1;
66716diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
66717index 77575b3..6e623d1 100644
66718--- a/kernel/trace/trace_stack.c
66719+++ b/kernel/trace/trace_stack.c
66720@@ -50,7 +50,7 @@ static inline void check_stack(void)
66721 return;
66722
66723 /* we do not handle interrupt stacks yet */
66724- if (!object_is_on_stack(&this_size))
66725+ if (!object_starts_on_stack(&this_size))
66726 return;
66727
66728 local_irq_save(flags);
66729diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
66730index 209b379..7f76423 100644
66731--- a/kernel/trace/trace_workqueue.c
66732+++ b/kernel/trace/trace_workqueue.c
66733@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
66734 int cpu;
66735 pid_t pid;
66736 /* Can be inserted from interrupt or user context, need to be atomic */
66737- atomic_t inserted;
66738+ atomic_unchecked_t inserted;
66739 /*
66740 * Don't need to be atomic, works are serialized in a single workqueue thread
66741 * on a single CPU.
66742@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
66743 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66744 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66745 if (node->pid == wq_thread->pid) {
66746- atomic_inc(&node->inserted);
66747+ atomic_inc_unchecked(&node->inserted);
66748 goto found;
66749 }
66750 }
66751@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
66752 tsk = get_pid_task(pid, PIDTYPE_PID);
66753 if (tsk) {
66754 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66755- atomic_read(&cws->inserted), cws->executed,
66756+ atomic_read_unchecked(&cws->inserted), cws->executed,
66757 tsk->comm);
66758 put_task_struct(tsk);
66759 }
66760diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
66761index 82928f5..92da771 100644
66762--- a/lib/Kconfig.debug
66763+++ b/lib/Kconfig.debug
66764@@ -1103,6 +1103,7 @@ config LATENCYTOP
66765 depends on DEBUG_KERNEL
66766 depends on STACKTRACE_SUPPORT
66767 depends on PROC_FS
66768+ depends on !GRKERNSEC_HIDESYM
66769 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
66770 select KALLSYMS
66771 select KALLSYMS_ALL
66772diff --git a/lib/bitmap.c b/lib/bitmap.c
66773index 0d4a127..33a06c7 100644
66774--- a/lib/bitmap.c
66775+++ b/lib/bitmap.c
66776@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
66777 {
66778 int c, old_c, totaldigits, ndigits, nchunks, nbits;
66779 u32 chunk;
66780- const char __user __force *ubuf = (const char __user __force *)buf;
66781+ const char __user *ubuf = (const char __force_user *)buf;
66782
66783 bitmap_zero(maskp, nmaskbits);
66784
66785@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
66786 {
66787 if (!access_ok(VERIFY_READ, ubuf, ulen))
66788 return -EFAULT;
66789- return __bitmap_parse((const char __force *)ubuf,
66790+ return __bitmap_parse((const char __force_kernel *)ubuf,
66791 ulen, 1, maskp, nmaskbits);
66792
66793 }
66794@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
66795 {
66796 unsigned a, b;
66797 int c, old_c, totaldigits;
66798- const char __user __force *ubuf = (const char __user __force *)buf;
66799+ const char __user *ubuf = (const char __force_user *)buf;
66800 int exp_digit, in_range;
66801
66802 totaldigits = c = 0;
66803@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
66804 {
66805 if (!access_ok(VERIFY_READ, ubuf, ulen))
66806 return -EFAULT;
66807- return __bitmap_parselist((const char __force *)ubuf,
66808+ return __bitmap_parselist((const char __force_kernel *)ubuf,
66809 ulen, 1, maskp, nmaskbits);
66810 }
66811 EXPORT_SYMBOL(bitmap_parselist_user);
66812diff --git a/lib/bug.c b/lib/bug.c
66813index 1955209..cbbb2ad 100644
66814--- a/lib/bug.c
66815+++ b/lib/bug.c
66816@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
66817 return BUG_TRAP_TYPE_NONE;
66818
66819 bug = find_bug(bugaddr);
66820+ if (!bug)
66821+ return BUG_TRAP_TYPE_NONE;
66822
66823 file = NULL;
66824 line = 0;
66825diff --git a/lib/debugobjects.c b/lib/debugobjects.c
66826index a78b7c6..2c73084 100644
66827--- a/lib/debugobjects.c
66828+++ b/lib/debugobjects.c
66829@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
66830 if (limit > 4)
66831 return;
66832
66833- is_on_stack = object_is_on_stack(addr);
66834+ is_on_stack = object_starts_on_stack(addr);
66835 if (is_on_stack == onstack)
66836 return;
66837
66838diff --git a/lib/devres.c b/lib/devres.c
66839index 7c0e953..f642b5c 100644
66840--- a/lib/devres.c
66841+++ b/lib/devres.c
66842@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
66843 void devm_iounmap(struct device *dev, void __iomem *addr)
66844 {
66845 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
66846- (void *)addr));
66847+ (void __force *)addr));
66848 iounmap(addr);
66849 }
66850 EXPORT_SYMBOL(devm_iounmap);
66851@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
66852 {
66853 ioport_unmap(addr);
66854 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
66855- devm_ioport_map_match, (void *)addr));
66856+ devm_ioport_map_match, (void __force *)addr));
66857 }
66858 EXPORT_SYMBOL(devm_ioport_unmap);
66859
66860diff --git a/lib/dma-debug.c b/lib/dma-debug.c
66861index fea790a..ebb0e82 100644
66862--- a/lib/dma-debug.c
66863+++ b/lib/dma-debug.c
66864@@ -925,7 +925,7 @@ out:
66865
66866 static void check_for_stack(struct device *dev, void *addr)
66867 {
66868- if (object_is_on_stack(addr))
66869+ if (object_starts_on_stack(addr))
66870 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66871 "stack [addr=%p]\n", addr);
66872 }
66873diff --git a/lib/extable.c b/lib/extable.c
66874index 4cac81e..63e9b8f 100644
66875--- a/lib/extable.c
66876+++ b/lib/extable.c
66877@@ -13,6 +13,7 @@
66878 #include <linux/init.h>
66879 #include <linux/sort.h>
66880 #include <asm/uaccess.h>
66881+#include <asm/pgtable.h>
66882
66883 #ifndef ARCH_HAS_SORT_EXTABLE
66884 /*
66885@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
66886 void sort_extable(struct exception_table_entry *start,
66887 struct exception_table_entry *finish)
66888 {
66889+ pax_open_kernel();
66890 sort(start, finish - start, sizeof(struct exception_table_entry),
66891 cmp_ex, NULL);
66892+ pax_close_kernel();
66893 }
66894
66895 #ifdef CONFIG_MODULES
66896diff --git a/lib/inflate.c b/lib/inflate.c
66897index 013a761..c28f3fc 100644
66898--- a/lib/inflate.c
66899+++ b/lib/inflate.c
66900@@ -269,7 +269,7 @@ static void free(void *where)
66901 malloc_ptr = free_mem_ptr;
66902 }
66903 #else
66904-#define malloc(a) kmalloc(a, GFP_KERNEL)
66905+#define malloc(a) kmalloc((a), GFP_KERNEL)
66906 #define free(a) kfree(a)
66907 #endif
66908
66909diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
66910index bd2bea9..6b3c95e 100644
66911--- a/lib/is_single_threaded.c
66912+++ b/lib/is_single_threaded.c
66913@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
66914 struct task_struct *p, *t;
66915 bool ret;
66916
66917+ if (!mm)
66918+ return true;
66919+
66920 if (atomic_read(&task->signal->live) != 1)
66921 return false;
66922
66923diff --git a/lib/kref.c b/lib/kref.c
66924index 3efb882..8492f4c 100644
66925--- a/lib/kref.c
66926+++ b/lib/kref.c
66927@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
66928 */
66929 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66930 {
66931- WARN_ON(release == NULL);
66932+ BUG_ON(release == NULL);
66933 WARN_ON(release == (void (*)(struct kref *))kfree);
66934
66935 if (atomic_dec_and_test(&kref->refcount)) {
66936diff --git a/lib/radix-tree.c b/lib/radix-tree.c
66937index d9df745..e73c2fe 100644
66938--- a/lib/radix-tree.c
66939+++ b/lib/radix-tree.c
66940@@ -80,7 +80,7 @@ struct radix_tree_preload {
66941 int nr;
66942 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66943 };
66944-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66945+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66946
66947 static inline void *ptr_to_indirect(void *ptr)
66948 {
66949diff --git a/lib/vsprintf.c b/lib/vsprintf.c
66950index 993599e..84dc70e 100644
66951--- a/lib/vsprintf.c
66952+++ b/lib/vsprintf.c
66953@@ -16,6 +16,9 @@
66954 * - scnprintf and vscnprintf
66955 */
66956
66957+#ifdef CONFIG_GRKERNSEC_HIDESYM
66958+#define __INCLUDED_BY_HIDESYM 1
66959+#endif
66960 #include <stdarg.h>
66961 #include <linux/module.h>
66962 #include <linux/types.h>
66963@@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
66964 char sym[KSYM_SYMBOL_LEN];
66965 if (ext == 'B')
66966 sprint_backtrace(sym, value);
66967- else if (ext != 'f' && ext != 's')
66968+ else if (ext != 'f' && ext != 's' && ext != 'a')
66969 sprint_symbol(sym, value);
66970 else
66971 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66972@@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
66973 return string(buf, end, uuid, spec);
66974 }
66975
66976+#ifdef CONFIG_GRKERNSEC_HIDESYM
66977+int kptr_restrict __read_mostly = 2;
66978+#else
66979 int kptr_restrict __read_mostly;
66980+#endif
66981
66982 /*
66983 * Show a '%p' thing. A kernel extension is that the '%p' is followed
66984@@ -791,6 +798,8 @@ int kptr_restrict __read_mostly;
66985 * - 'S' For symbolic direct pointers with offset
66986 * - 's' For symbolic direct pointers without offset
66987 * - 'B' For backtraced symbolic direct pointers with offset
66988+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66989+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66990 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
66991 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
66992 * - 'M' For a 6-byte MAC address, it prints the address in the
66993@@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
66994 {
66995 if (!ptr && *fmt != 'K') {
66996 /*
66997- * Print (null) with the same width as a pointer so it makes
66998+ * Print (nil) with the same width as a pointer so it makes
66999 * tabular output look nice.
67000 */
67001 if (spec.field_width == -1)
67002 spec.field_width = 2 * sizeof(void *);
67003- return string(buf, end, "(null)", spec);
67004+ return string(buf, end, "(nil)", spec);
67005 }
67006
67007 switch (*fmt) {
67008@@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67009 /* Fallthrough */
67010 case 'S':
67011 case 's':
67012+#ifdef CONFIG_GRKERNSEC_HIDESYM
67013+ break;
67014+#else
67015+ return symbol_string(buf, end, ptr, spec, *fmt);
67016+#endif
67017+ case 'A':
67018+ case 'a':
67019 case 'B':
67020 return symbol_string(buf, end, ptr, spec, *fmt);
67021 case 'R':
67022@@ -1608,11 +1624,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67023 typeof(type) value; \
67024 if (sizeof(type) == 8) { \
67025 args = PTR_ALIGN(args, sizeof(u32)); \
67026- *(u32 *)&value = *(u32 *)args; \
67027- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67028+ *(u32 *)&value = *(const u32 *)args; \
67029+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67030 } else { \
67031 args = PTR_ALIGN(args, sizeof(type)); \
67032- value = *(typeof(type) *)args; \
67033+ value = *(const typeof(type) *)args; \
67034 } \
67035 args += sizeof(type); \
67036 value; \
67037@@ -1675,7 +1691,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67038 case FORMAT_TYPE_STR: {
67039 const char *str_arg = args;
67040 args += strlen(str_arg) + 1;
67041- str = string(str, end, (char *)str_arg, spec);
67042+ str = string(str, end, str_arg, spec);
67043 break;
67044 }
67045
67046diff --git a/localversion-grsec b/localversion-grsec
67047new file mode 100644
67048index 0000000..7cd6065
67049--- /dev/null
67050+++ b/localversion-grsec
67051@@ -0,0 +1 @@
67052+-grsec
67053diff --git a/mm/Kconfig b/mm/Kconfig
67054index 011b110..b492af2 100644
67055--- a/mm/Kconfig
67056+++ b/mm/Kconfig
67057@@ -241,10 +241,10 @@ config KSM
67058 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67059
67060 config DEFAULT_MMAP_MIN_ADDR
67061- int "Low address space to protect from user allocation"
67062+ int "Low address space to protect from user allocation"
67063 depends on MMU
67064- default 4096
67065- help
67066+ default 65536
67067+ help
67068 This is the portion of low virtual memory which should be protected
67069 from userspace allocation. Keeping a user from writing to low pages
67070 can help reduce the impact of kernel NULL pointer bugs.
67071diff --git a/mm/filemap.c b/mm/filemap.c
67072index 90286a4..f441caa 100644
67073--- a/mm/filemap.c
67074+++ b/mm/filemap.c
67075@@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
67076 struct address_space *mapping = file->f_mapping;
67077
67078 if (!mapping->a_ops->readpage)
67079- return -ENOEXEC;
67080+ return -ENODEV;
67081 file_accessed(file);
67082 vma->vm_ops = &generic_file_vm_ops;
67083 vma->vm_flags |= VM_CAN_NONLINEAR;
67084@@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
67085 *pos = i_size_read(inode);
67086
67087 if (limit != RLIM_INFINITY) {
67088+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67089 if (*pos >= limit) {
67090 send_sig(SIGXFSZ, current, 0);
67091 return -EFBIG;
67092diff --git a/mm/fremap.c b/mm/fremap.c
67093index 9ed4fd4..c42648d 100644
67094--- a/mm/fremap.c
67095+++ b/mm/fremap.c
67096@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
67097 retry:
67098 vma = find_vma(mm, start);
67099
67100+#ifdef CONFIG_PAX_SEGMEXEC
67101+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67102+ goto out;
67103+#endif
67104+
67105 /*
67106 * Make sure the vma is shared, that it supports prefaulting,
67107 * and that the remapped range is valid and fully within
67108diff --git a/mm/highmem.c b/mm/highmem.c
67109index 57d82c6..e9e0552 100644
67110--- a/mm/highmem.c
67111+++ b/mm/highmem.c
67112@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67113 * So no dangers, even with speculative execution.
67114 */
67115 page = pte_page(pkmap_page_table[i]);
67116+ pax_open_kernel();
67117 pte_clear(&init_mm, (unsigned long)page_address(page),
67118 &pkmap_page_table[i]);
67119-
67120+ pax_close_kernel();
67121 set_page_address(page, NULL);
67122 need_flush = 1;
67123 }
67124@@ -186,9 +187,11 @@ start:
67125 }
67126 }
67127 vaddr = PKMAP_ADDR(last_pkmap_nr);
67128+
67129+ pax_open_kernel();
67130 set_pte_at(&init_mm, vaddr,
67131 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67132-
67133+ pax_close_kernel();
67134 pkmap_count[last_pkmap_nr] = 1;
67135 set_page_address(page, (void *)vaddr);
67136
67137diff --git a/mm/huge_memory.c b/mm/huge_memory.c
67138index 36b3d98..584cb54 100644
67139--- a/mm/huge_memory.c
67140+++ b/mm/huge_memory.c
67141@@ -703,7 +703,7 @@ out:
67142 * run pte_offset_map on the pmd, if an huge pmd could
67143 * materialize from under us from a different thread.
67144 */
67145- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67146+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67147 return VM_FAULT_OOM;
67148 /* if an huge pmd materialized from under us just retry later */
67149 if (unlikely(pmd_trans_huge(*pmd)))
67150diff --git a/mm/hugetlb.c b/mm/hugetlb.c
67151index 2316840..b418671 100644
67152--- a/mm/hugetlb.c
67153+++ b/mm/hugetlb.c
67154@@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
67155 return 1;
67156 }
67157
67158+#ifdef CONFIG_PAX_SEGMEXEC
67159+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67160+{
67161+ struct mm_struct *mm = vma->vm_mm;
67162+ struct vm_area_struct *vma_m;
67163+ unsigned long address_m;
67164+ pte_t *ptep_m;
67165+
67166+ vma_m = pax_find_mirror_vma(vma);
67167+ if (!vma_m)
67168+ return;
67169+
67170+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67171+ address_m = address + SEGMEXEC_TASK_SIZE;
67172+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67173+ get_page(page_m);
67174+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
67175+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67176+}
67177+#endif
67178+
67179 /*
67180 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67181 */
67182@@ -2450,6 +2471,11 @@ retry_avoidcopy:
67183 make_huge_pte(vma, new_page, 1));
67184 page_remove_rmap(old_page);
67185 hugepage_add_new_anon_rmap(new_page, vma, address);
67186+
67187+#ifdef CONFIG_PAX_SEGMEXEC
67188+ pax_mirror_huge_pte(vma, address, new_page);
67189+#endif
67190+
67191 /* Make the old page be freed below */
67192 new_page = old_page;
67193 mmu_notifier_invalidate_range_end(mm,
67194@@ -2601,6 +2627,10 @@ retry:
67195 && (vma->vm_flags & VM_SHARED)));
67196 set_huge_pte_at(mm, address, ptep, new_pte);
67197
67198+#ifdef CONFIG_PAX_SEGMEXEC
67199+ pax_mirror_huge_pte(vma, address, page);
67200+#endif
67201+
67202 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67203 /* Optimization, do the COW without a second fault */
67204 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67205@@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67206 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67207 struct hstate *h = hstate_vma(vma);
67208
67209+#ifdef CONFIG_PAX_SEGMEXEC
67210+ struct vm_area_struct *vma_m;
67211+#endif
67212+
67213 ptep = huge_pte_offset(mm, address);
67214 if (ptep) {
67215 entry = huge_ptep_get(ptep);
67216@@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67217 VM_FAULT_SET_HINDEX(h - hstates);
67218 }
67219
67220+#ifdef CONFIG_PAX_SEGMEXEC
67221+ vma_m = pax_find_mirror_vma(vma);
67222+ if (vma_m) {
67223+ unsigned long address_m;
67224+
67225+ if (vma->vm_start > vma_m->vm_start) {
67226+ address_m = address;
67227+ address -= SEGMEXEC_TASK_SIZE;
67228+ vma = vma_m;
67229+ h = hstate_vma(vma);
67230+ } else
67231+ address_m = address + SEGMEXEC_TASK_SIZE;
67232+
67233+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67234+ return VM_FAULT_OOM;
67235+ address_m &= HPAGE_MASK;
67236+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67237+ }
67238+#endif
67239+
67240 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67241 if (!ptep)
67242 return VM_FAULT_OOM;
67243diff --git a/mm/internal.h b/mm/internal.h
67244index 2189af4..f2ca332 100644
67245--- a/mm/internal.h
67246+++ b/mm/internal.h
67247@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
67248 * in mm/page_alloc.c
67249 */
67250 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67251+extern void free_compound_page(struct page *page);
67252 extern void prep_compound_page(struct page *page, unsigned long order);
67253 #ifdef CONFIG_MEMORY_FAILURE
67254 extern bool is_free_buddy_page(struct page *page);
67255diff --git a/mm/kmemleak.c b/mm/kmemleak.c
67256index f3b2a00..61da94d 100644
67257--- a/mm/kmemleak.c
67258+++ b/mm/kmemleak.c
67259@@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
67260
67261 for (i = 0; i < object->trace_len; i++) {
67262 void *ptr = (void *)object->trace[i];
67263- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67264+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67265 }
67266 }
67267
67268diff --git a/mm/maccess.c b/mm/maccess.c
67269index d53adf9..03a24bf 100644
67270--- a/mm/maccess.c
67271+++ b/mm/maccess.c
67272@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
67273 set_fs(KERNEL_DS);
67274 pagefault_disable();
67275 ret = __copy_from_user_inatomic(dst,
67276- (__force const void __user *)src, size);
67277+ (const void __force_user *)src, size);
67278 pagefault_enable();
67279 set_fs(old_fs);
67280
67281@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
67282
67283 set_fs(KERNEL_DS);
67284 pagefault_disable();
67285- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
67286+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
67287 pagefault_enable();
67288 set_fs(old_fs);
67289
67290diff --git a/mm/madvise.c b/mm/madvise.c
67291index 74bf193..feb6fd3 100644
67292--- a/mm/madvise.c
67293+++ b/mm/madvise.c
67294@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
67295 pgoff_t pgoff;
67296 unsigned long new_flags = vma->vm_flags;
67297
67298+#ifdef CONFIG_PAX_SEGMEXEC
67299+ struct vm_area_struct *vma_m;
67300+#endif
67301+
67302 switch (behavior) {
67303 case MADV_NORMAL:
67304 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67305@@ -110,6 +114,13 @@ success:
67306 /*
67307 * vm_flags is protected by the mmap_sem held in write mode.
67308 */
67309+
67310+#ifdef CONFIG_PAX_SEGMEXEC
67311+ vma_m = pax_find_mirror_vma(vma);
67312+ if (vma_m)
67313+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67314+#endif
67315+
67316 vma->vm_flags = new_flags;
67317
67318 out:
67319@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67320 struct vm_area_struct ** prev,
67321 unsigned long start, unsigned long end)
67322 {
67323+
67324+#ifdef CONFIG_PAX_SEGMEXEC
67325+ struct vm_area_struct *vma_m;
67326+#endif
67327+
67328 *prev = vma;
67329 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67330 return -EINVAL;
67331@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67332 zap_page_range(vma, start, end - start, &details);
67333 } else
67334 zap_page_range(vma, start, end - start, NULL);
67335+
67336+#ifdef CONFIG_PAX_SEGMEXEC
67337+ vma_m = pax_find_mirror_vma(vma);
67338+ if (vma_m) {
67339+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67340+ struct zap_details details = {
67341+ .nonlinear_vma = vma_m,
67342+ .last_index = ULONG_MAX,
67343+ };
67344+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67345+ } else
67346+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67347+ }
67348+#endif
67349+
67350 return 0;
67351 }
67352
67353@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
67354 if (end < start)
67355 goto out;
67356
67357+#ifdef CONFIG_PAX_SEGMEXEC
67358+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67359+ if (end > SEGMEXEC_TASK_SIZE)
67360+ goto out;
67361+ } else
67362+#endif
67363+
67364+ if (end > TASK_SIZE)
67365+ goto out;
67366+
67367 error = 0;
67368 if (end == start)
67369 goto out;
67370diff --git a/mm/memory-failure.c b/mm/memory-failure.c
67371index 06d3479..0778eef 100644
67372--- a/mm/memory-failure.c
67373+++ b/mm/memory-failure.c
67374@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
67375
67376 int sysctl_memory_failure_recovery __read_mostly = 1;
67377
67378-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67379+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67380
67381 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
67382
67383@@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
67384 si.si_signo = SIGBUS;
67385 si.si_errno = 0;
67386 si.si_code = BUS_MCEERR_AO;
67387- si.si_addr = (void *)addr;
67388+ si.si_addr = (void __user *)addr;
67389 #ifdef __ARCH_SI_TRAPNO
67390 si.si_trapno = trapno;
67391 #endif
67392@@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67393 }
67394
67395 nr_pages = 1 << compound_trans_order(hpage);
67396- atomic_long_add(nr_pages, &mce_bad_pages);
67397+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
67398
67399 /*
67400 * We need/can do nothing about count=0 pages.
67401@@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67402 if (!PageHWPoison(hpage)
67403 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
67404 || (p != hpage && TestSetPageHWPoison(hpage))) {
67405- atomic_long_sub(nr_pages, &mce_bad_pages);
67406+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67407 return 0;
67408 }
67409 set_page_hwpoison_huge_page(hpage);
67410@@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67411 }
67412 if (hwpoison_filter(p)) {
67413 if (TestClearPageHWPoison(p))
67414- atomic_long_sub(nr_pages, &mce_bad_pages);
67415+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67416 unlock_page(hpage);
67417 put_page(hpage);
67418 return 0;
67419@@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
67420 return 0;
67421 }
67422 if (TestClearPageHWPoison(p))
67423- atomic_long_sub(nr_pages, &mce_bad_pages);
67424+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67425 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
67426 return 0;
67427 }
67428@@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
67429 */
67430 if (TestClearPageHWPoison(page)) {
67431 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
67432- atomic_long_sub(nr_pages, &mce_bad_pages);
67433+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67434 freeit = 1;
67435 if (PageHuge(page))
67436 clear_page_hwpoison_huge_page(page);
67437@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
67438 }
67439 done:
67440 if (!PageHWPoison(hpage))
67441- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
67442+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
67443 set_page_hwpoison_huge_page(hpage);
67444 dequeue_hwpoisoned_huge_page(hpage);
67445 /* keep elevated page count for bad page */
67446@@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
67447 return ret;
67448
67449 done:
67450- atomic_long_add(1, &mce_bad_pages);
67451+ atomic_long_add_unchecked(1, &mce_bad_pages);
67452 SetPageHWPoison(page);
67453 /* keep elevated page count for bad page */
67454 return ret;
67455diff --git a/mm/memory.c b/mm/memory.c
67456index 829d437..3d3926a 100644
67457--- a/mm/memory.c
67458+++ b/mm/memory.c
67459@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
67460 return;
67461
67462 pmd = pmd_offset(pud, start);
67463+
67464+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67465 pud_clear(pud);
67466 pmd_free_tlb(tlb, pmd, start);
67467+#endif
67468+
67469 }
67470
67471 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67472@@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67473 if (end - 1 > ceiling - 1)
67474 return;
67475
67476+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67477 pud = pud_offset(pgd, start);
67478 pgd_clear(pgd);
67479 pud_free_tlb(tlb, pud, start);
67480+#endif
67481+
67482 }
67483
67484 /*
67485@@ -1566,12 +1573,6 @@ no_page_table:
67486 return page;
67487 }
67488
67489-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67490-{
67491- return stack_guard_page_start(vma, addr) ||
67492- stack_guard_page_end(vma, addr+PAGE_SIZE);
67493-}
67494-
67495 /**
67496 * __get_user_pages() - pin user pages in memory
67497 * @tsk: task_struct of target task
67498@@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67499 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67500 i = 0;
67501
67502- do {
67503+ while (nr_pages) {
67504 struct vm_area_struct *vma;
67505
67506- vma = find_extend_vma(mm, start);
67507+ vma = find_vma(mm, start);
67508 if (!vma && in_gate_area(mm, start)) {
67509 unsigned long pg = start & PAGE_MASK;
67510 pgd_t *pgd;
67511@@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67512 goto next_page;
67513 }
67514
67515- if (!vma ||
67516+ if (!vma || start < vma->vm_start ||
67517 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67518 !(vm_flags & vma->vm_flags))
67519 return i ? : -EFAULT;
67520@@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67521 int ret;
67522 unsigned int fault_flags = 0;
67523
67524- /* For mlock, just skip the stack guard page. */
67525- if (foll_flags & FOLL_MLOCK) {
67526- if (stack_guard_page(vma, start))
67527- goto next_page;
67528- }
67529 if (foll_flags & FOLL_WRITE)
67530 fault_flags |= FAULT_FLAG_WRITE;
67531 if (nonblocking)
67532@@ -1800,7 +1796,7 @@ next_page:
67533 start += PAGE_SIZE;
67534 nr_pages--;
67535 } while (nr_pages && start < vma->vm_end);
67536- } while (nr_pages);
67537+ }
67538 return i;
67539 }
67540 EXPORT_SYMBOL(__get_user_pages);
67541@@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
67542 page_add_file_rmap(page);
67543 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67544
67545+#ifdef CONFIG_PAX_SEGMEXEC
67546+ pax_mirror_file_pte(vma, addr, page, ptl);
67547+#endif
67548+
67549 retval = 0;
67550 pte_unmap_unlock(pte, ptl);
67551 return retval;
67552@@ -2041,10 +2041,22 @@ out:
67553 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67554 struct page *page)
67555 {
67556+
67557+#ifdef CONFIG_PAX_SEGMEXEC
67558+ struct vm_area_struct *vma_m;
67559+#endif
67560+
67561 if (addr < vma->vm_start || addr >= vma->vm_end)
67562 return -EFAULT;
67563 if (!page_count(page))
67564 return -EINVAL;
67565+
67566+#ifdef CONFIG_PAX_SEGMEXEC
67567+ vma_m = pax_find_mirror_vma(vma);
67568+ if (vma_m)
67569+ vma_m->vm_flags |= VM_INSERTPAGE;
67570+#endif
67571+
67572 vma->vm_flags |= VM_INSERTPAGE;
67573 return insert_page(vma, addr, page, vma->vm_page_prot);
67574 }
67575@@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
67576 unsigned long pfn)
67577 {
67578 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67579+ BUG_ON(vma->vm_mirror);
67580
67581 if (addr < vma->vm_start || addr >= vma->vm_end)
67582 return -EFAULT;
67583@@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
67584 copy_user_highpage(dst, src, va, vma);
67585 }
67586
67587+#ifdef CONFIG_PAX_SEGMEXEC
67588+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67589+{
67590+ struct mm_struct *mm = vma->vm_mm;
67591+ spinlock_t *ptl;
67592+ pte_t *pte, entry;
67593+
67594+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67595+ entry = *pte;
67596+ if (!pte_present(entry)) {
67597+ if (!pte_none(entry)) {
67598+ BUG_ON(pte_file(entry));
67599+ free_swap_and_cache(pte_to_swp_entry(entry));
67600+ pte_clear_not_present_full(mm, address, pte, 0);
67601+ }
67602+ } else {
67603+ struct page *page;
67604+
67605+ flush_cache_page(vma, address, pte_pfn(entry));
67606+ entry = ptep_clear_flush(vma, address, pte);
67607+ BUG_ON(pte_dirty(entry));
67608+ page = vm_normal_page(vma, address, entry);
67609+ if (page) {
67610+ update_hiwater_rss(mm);
67611+ if (PageAnon(page))
67612+ dec_mm_counter_fast(mm, MM_ANONPAGES);
67613+ else
67614+ dec_mm_counter_fast(mm, MM_FILEPAGES);
67615+ page_remove_rmap(page);
67616+ page_cache_release(page);
67617+ }
67618+ }
67619+ pte_unmap_unlock(pte, ptl);
67620+}
67621+
67622+/* PaX: if vma is mirrored, synchronize the mirror's PTE
67623+ *
67624+ * the ptl of the lower mapped page is held on entry and is not released on exit
67625+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67626+ */
67627+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67628+{
67629+ struct mm_struct *mm = vma->vm_mm;
67630+ unsigned long address_m;
67631+ spinlock_t *ptl_m;
67632+ struct vm_area_struct *vma_m;
67633+ pmd_t *pmd_m;
67634+ pte_t *pte_m, entry_m;
67635+
67636+ BUG_ON(!page_m || !PageAnon(page_m));
67637+
67638+ vma_m = pax_find_mirror_vma(vma);
67639+ if (!vma_m)
67640+ return;
67641+
67642+ BUG_ON(!PageLocked(page_m));
67643+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67644+ address_m = address + SEGMEXEC_TASK_SIZE;
67645+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67646+ pte_m = pte_offset_map(pmd_m, address_m);
67647+ ptl_m = pte_lockptr(mm, pmd_m);
67648+ if (ptl != ptl_m) {
67649+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67650+ if (!pte_none(*pte_m))
67651+ goto out;
67652+ }
67653+
67654+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67655+ page_cache_get(page_m);
67656+ page_add_anon_rmap(page_m, vma_m, address_m);
67657+ inc_mm_counter_fast(mm, MM_ANONPAGES);
67658+ set_pte_at(mm, address_m, pte_m, entry_m);
67659+ update_mmu_cache(vma_m, address_m, entry_m);
67660+out:
67661+ if (ptl != ptl_m)
67662+ spin_unlock(ptl_m);
67663+ pte_unmap(pte_m);
67664+ unlock_page(page_m);
67665+}
67666+
67667+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67668+{
67669+ struct mm_struct *mm = vma->vm_mm;
67670+ unsigned long address_m;
67671+ spinlock_t *ptl_m;
67672+ struct vm_area_struct *vma_m;
67673+ pmd_t *pmd_m;
67674+ pte_t *pte_m, entry_m;
67675+
67676+ BUG_ON(!page_m || PageAnon(page_m));
67677+
67678+ vma_m = pax_find_mirror_vma(vma);
67679+ if (!vma_m)
67680+ return;
67681+
67682+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67683+ address_m = address + SEGMEXEC_TASK_SIZE;
67684+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67685+ pte_m = pte_offset_map(pmd_m, address_m);
67686+ ptl_m = pte_lockptr(mm, pmd_m);
67687+ if (ptl != ptl_m) {
67688+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67689+ if (!pte_none(*pte_m))
67690+ goto out;
67691+ }
67692+
67693+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67694+ page_cache_get(page_m);
67695+ page_add_file_rmap(page_m);
67696+ inc_mm_counter_fast(mm, MM_FILEPAGES);
67697+ set_pte_at(mm, address_m, pte_m, entry_m);
67698+ update_mmu_cache(vma_m, address_m, entry_m);
67699+out:
67700+ if (ptl != ptl_m)
67701+ spin_unlock(ptl_m);
67702+ pte_unmap(pte_m);
67703+}
67704+
67705+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67706+{
67707+ struct mm_struct *mm = vma->vm_mm;
67708+ unsigned long address_m;
67709+ spinlock_t *ptl_m;
67710+ struct vm_area_struct *vma_m;
67711+ pmd_t *pmd_m;
67712+ pte_t *pte_m, entry_m;
67713+
67714+ vma_m = pax_find_mirror_vma(vma);
67715+ if (!vma_m)
67716+ return;
67717+
67718+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67719+ address_m = address + SEGMEXEC_TASK_SIZE;
67720+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67721+ pte_m = pte_offset_map(pmd_m, address_m);
67722+ ptl_m = pte_lockptr(mm, pmd_m);
67723+ if (ptl != ptl_m) {
67724+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67725+ if (!pte_none(*pte_m))
67726+ goto out;
67727+ }
67728+
67729+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67730+ set_pte_at(mm, address_m, pte_m, entry_m);
67731+out:
67732+ if (ptl != ptl_m)
67733+ spin_unlock(ptl_m);
67734+ pte_unmap(pte_m);
67735+}
67736+
67737+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67738+{
67739+ struct page *page_m;
67740+ pte_t entry;
67741+
67742+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67743+ goto out;
67744+
67745+ entry = *pte;
67746+ page_m = vm_normal_page(vma, address, entry);
67747+ if (!page_m)
67748+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67749+ else if (PageAnon(page_m)) {
67750+ if (pax_find_mirror_vma(vma)) {
67751+ pte_unmap_unlock(pte, ptl);
67752+ lock_page(page_m);
67753+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67754+ if (pte_same(entry, *pte))
67755+ pax_mirror_anon_pte(vma, address, page_m, ptl);
67756+ else
67757+ unlock_page(page_m);
67758+ }
67759+ } else
67760+ pax_mirror_file_pte(vma, address, page_m, ptl);
67761+
67762+out:
67763+ pte_unmap_unlock(pte, ptl);
67764+}
67765+#endif
67766+
67767 /*
67768 * This routine handles present pages, when users try to write
67769 * to a shared page. It is done by copying the page to a new address
67770@@ -2656,6 +2849,12 @@ gotten:
67771 */
67772 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67773 if (likely(pte_same(*page_table, orig_pte))) {
67774+
67775+#ifdef CONFIG_PAX_SEGMEXEC
67776+ if (pax_find_mirror_vma(vma))
67777+ BUG_ON(!trylock_page(new_page));
67778+#endif
67779+
67780 if (old_page) {
67781 if (!PageAnon(old_page)) {
67782 dec_mm_counter_fast(mm, MM_FILEPAGES);
67783@@ -2707,6 +2906,10 @@ gotten:
67784 page_remove_rmap(old_page);
67785 }
67786
67787+#ifdef CONFIG_PAX_SEGMEXEC
67788+ pax_mirror_anon_pte(vma, address, new_page, ptl);
67789+#endif
67790+
67791 /* Free the old page.. */
67792 new_page = old_page;
67793 ret |= VM_FAULT_WRITE;
67794@@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
67795 swap_free(entry);
67796 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67797 try_to_free_swap(page);
67798+
67799+#ifdef CONFIG_PAX_SEGMEXEC
67800+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67801+#endif
67802+
67803 unlock_page(page);
67804 if (swapcache) {
67805 /*
67806@@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
67807
67808 /* No need to invalidate - it was non-present before */
67809 update_mmu_cache(vma, address, page_table);
67810+
67811+#ifdef CONFIG_PAX_SEGMEXEC
67812+ pax_mirror_anon_pte(vma, address, page, ptl);
67813+#endif
67814+
67815 unlock:
67816 pte_unmap_unlock(page_table, ptl);
67817 out:
67818@@ -3028,40 +3241,6 @@ out_release:
67819 }
67820
67821 /*
67822- * This is like a special single-page "expand_{down|up}wards()",
67823- * except we must first make sure that 'address{-|+}PAGE_SIZE'
67824- * doesn't hit another vma.
67825- */
67826-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67827-{
67828- address &= PAGE_MASK;
67829- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67830- struct vm_area_struct *prev = vma->vm_prev;
67831-
67832- /*
67833- * Is there a mapping abutting this one below?
67834- *
67835- * That's only ok if it's the same stack mapping
67836- * that has gotten split..
67837- */
67838- if (prev && prev->vm_end == address)
67839- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67840-
67841- expand_downwards(vma, address - PAGE_SIZE);
67842- }
67843- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67844- struct vm_area_struct *next = vma->vm_next;
67845-
67846- /* As VM_GROWSDOWN but s/below/above/ */
67847- if (next && next->vm_start == address + PAGE_SIZE)
67848- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67849-
67850- expand_upwards(vma, address + PAGE_SIZE);
67851- }
67852- return 0;
67853-}
67854-
67855-/*
67856 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67857 * but allow concurrent faults), and pte mapped but not yet locked.
67858 * We return with mmap_sem still held, but pte unmapped and unlocked.
67859@@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
67860 unsigned long address, pte_t *page_table, pmd_t *pmd,
67861 unsigned int flags)
67862 {
67863- struct page *page;
67864+ struct page *page = NULL;
67865 spinlock_t *ptl;
67866 pte_t entry;
67867
67868- pte_unmap(page_table);
67869-
67870- /* Check if we need to add a guard page to the stack */
67871- if (check_stack_guard_page(vma, address) < 0)
67872- return VM_FAULT_SIGBUS;
67873-
67874- /* Use the zero-page for reads */
67875 if (!(flags & FAULT_FLAG_WRITE)) {
67876 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67877 vma->vm_page_prot));
67878- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67879+ ptl = pte_lockptr(mm, pmd);
67880+ spin_lock(ptl);
67881 if (!pte_none(*page_table))
67882 goto unlock;
67883 goto setpte;
67884 }
67885
67886 /* Allocate our own private page. */
67887+ pte_unmap(page_table);
67888+
67889 if (unlikely(anon_vma_prepare(vma)))
67890 goto oom;
67891 page = alloc_zeroed_user_highpage_movable(vma, address);
67892@@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
67893 if (!pte_none(*page_table))
67894 goto release;
67895
67896+#ifdef CONFIG_PAX_SEGMEXEC
67897+ if (pax_find_mirror_vma(vma))
67898+ BUG_ON(!trylock_page(page));
67899+#endif
67900+
67901 inc_mm_counter_fast(mm, MM_ANONPAGES);
67902 page_add_new_anon_rmap(page, vma, address);
67903 setpte:
67904@@ -3116,6 +3296,12 @@ setpte:
67905
67906 /* No need to invalidate - it was non-present before */
67907 update_mmu_cache(vma, address, page_table);
67908+
67909+#ifdef CONFIG_PAX_SEGMEXEC
67910+ if (page)
67911+ pax_mirror_anon_pte(vma, address, page, ptl);
67912+#endif
67913+
67914 unlock:
67915 pte_unmap_unlock(page_table, ptl);
67916 return 0;
67917@@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67918 */
67919 /* Only go through if we didn't race with anybody else... */
67920 if (likely(pte_same(*page_table, orig_pte))) {
67921+
67922+#ifdef CONFIG_PAX_SEGMEXEC
67923+ if (anon && pax_find_mirror_vma(vma))
67924+ BUG_ON(!trylock_page(page));
67925+#endif
67926+
67927 flush_icache_page(vma, page);
67928 entry = mk_pte(page, vma->vm_page_prot);
67929 if (flags & FAULT_FLAG_WRITE)
67930@@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67931
67932 /* no need to invalidate: a not-present page won't be cached */
67933 update_mmu_cache(vma, address, page_table);
67934+
67935+#ifdef CONFIG_PAX_SEGMEXEC
67936+ if (anon)
67937+ pax_mirror_anon_pte(vma, address, page, ptl);
67938+ else
67939+ pax_mirror_file_pte(vma, address, page, ptl);
67940+#endif
67941+
67942 } else {
67943 if (cow_page)
67944 mem_cgroup_uncharge_page(cow_page);
67945@@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
67946 if (flags & FAULT_FLAG_WRITE)
67947 flush_tlb_fix_spurious_fault(vma, address);
67948 }
67949+
67950+#ifdef CONFIG_PAX_SEGMEXEC
67951+ pax_mirror_pte(vma, address, pte, pmd, ptl);
67952+ return 0;
67953+#endif
67954+
67955 unlock:
67956 pte_unmap_unlock(pte, ptl);
67957 return 0;
67958@@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67959 pmd_t *pmd;
67960 pte_t *pte;
67961
67962+#ifdef CONFIG_PAX_SEGMEXEC
67963+ struct vm_area_struct *vma_m;
67964+#endif
67965+
67966 __set_current_state(TASK_RUNNING);
67967
67968 count_vm_event(PGFAULT);
67969@@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67970 if (unlikely(is_vm_hugetlb_page(vma)))
67971 return hugetlb_fault(mm, vma, address, flags);
67972
67973+#ifdef CONFIG_PAX_SEGMEXEC
67974+ vma_m = pax_find_mirror_vma(vma);
67975+ if (vma_m) {
67976+ unsigned long address_m;
67977+ pgd_t *pgd_m;
67978+ pud_t *pud_m;
67979+ pmd_t *pmd_m;
67980+
67981+ if (vma->vm_start > vma_m->vm_start) {
67982+ address_m = address;
67983+ address -= SEGMEXEC_TASK_SIZE;
67984+ vma = vma_m;
67985+ } else
67986+ address_m = address + SEGMEXEC_TASK_SIZE;
67987+
67988+ pgd_m = pgd_offset(mm, address_m);
67989+ pud_m = pud_alloc(mm, pgd_m, address_m);
67990+ if (!pud_m)
67991+ return VM_FAULT_OOM;
67992+ pmd_m = pmd_alloc(mm, pud_m, address_m);
67993+ if (!pmd_m)
67994+ return VM_FAULT_OOM;
67995+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
67996+ return VM_FAULT_OOM;
67997+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67998+ }
67999+#endif
68000+
68001 pgd = pgd_offset(mm, address);
68002 pud = pud_alloc(mm, pgd, address);
68003 if (!pud)
68004@@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68005 * run pte_offset_map on the pmd, if an huge pmd could
68006 * materialize from under us from a different thread.
68007 */
68008- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68009+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68010 return VM_FAULT_OOM;
68011 /* if an huge pmd materialized from under us just retry later */
68012 if (unlikely(pmd_trans_huge(*pmd)))
68013@@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68014 gate_vma.vm_start = FIXADDR_USER_START;
68015 gate_vma.vm_end = FIXADDR_USER_END;
68016 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68017- gate_vma.vm_page_prot = __P101;
68018+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68019 /*
68020 * Make sure the vDSO gets into every core dump.
68021 * Dumping its contents makes post-mortem fully interpretable later
68022diff --git a/mm/mempolicy.c b/mm/mempolicy.c
68023index c3fdbcb..2e8ef90 100644
68024--- a/mm/mempolicy.c
68025+++ b/mm/mempolicy.c
68026@@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68027 unsigned long vmstart;
68028 unsigned long vmend;
68029
68030+#ifdef CONFIG_PAX_SEGMEXEC
68031+ struct vm_area_struct *vma_m;
68032+#endif
68033+
68034 vma = find_vma_prev(mm, start, &prev);
68035 if (!vma || vma->vm_start > start)
68036 return -EFAULT;
68037@@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68038 err = policy_vma(vma, new_pol);
68039 if (err)
68040 goto out;
68041+
68042+#ifdef CONFIG_PAX_SEGMEXEC
68043+ vma_m = pax_find_mirror_vma(vma);
68044+ if (vma_m) {
68045+ err = policy_vma(vma_m, new_pol);
68046+ if (err)
68047+ goto out;
68048+ }
68049+#endif
68050+
68051 }
68052
68053 out:
68054@@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
68055
68056 if (end < start)
68057 return -EINVAL;
68058+
68059+#ifdef CONFIG_PAX_SEGMEXEC
68060+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68061+ if (end > SEGMEXEC_TASK_SIZE)
68062+ return -EINVAL;
68063+ } else
68064+#endif
68065+
68066+ if (end > TASK_SIZE)
68067+ return -EINVAL;
68068+
68069 if (end == start)
68070 return 0;
68071
68072@@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68073 if (!mm)
68074 goto out;
68075
68076+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68077+ if (mm != current->mm &&
68078+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68079+ err = -EPERM;
68080+ goto out;
68081+ }
68082+#endif
68083+
68084 /*
68085 * Check if this process has the right to modify the specified
68086 * process. The right exists if the process has administrative
68087@@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68088 rcu_read_lock();
68089 tcred = __task_cred(task);
68090 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68091- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68092- !capable(CAP_SYS_NICE)) {
68093+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68094 rcu_read_unlock();
68095 err = -EPERM;
68096 goto out;
68097diff --git a/mm/migrate.c b/mm/migrate.c
68098index 177aca4..ab3a744 100644
68099--- a/mm/migrate.c
68100+++ b/mm/migrate.c
68101@@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68102 if (!mm)
68103 return -EINVAL;
68104
68105+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68106+ if (mm != current->mm &&
68107+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68108+ err = -EPERM;
68109+ goto out;
68110+ }
68111+#endif
68112+
68113 /*
68114 * Check if this process has the right to modify the specified
68115 * process. The right exists if the process has administrative
68116@@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68117 rcu_read_lock();
68118 tcred = __task_cred(task);
68119 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68120- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68121- !capable(CAP_SYS_NICE)) {
68122+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68123 rcu_read_unlock();
68124 err = -EPERM;
68125 goto out;
68126diff --git a/mm/mlock.c b/mm/mlock.c
68127index 4f4f53b..9511904 100644
68128--- a/mm/mlock.c
68129+++ b/mm/mlock.c
68130@@ -13,6 +13,7 @@
68131 #include <linux/pagemap.h>
68132 #include <linux/mempolicy.h>
68133 #include <linux/syscalls.h>
68134+#include <linux/security.h>
68135 #include <linux/sched.h>
68136 #include <linux/export.h>
68137 #include <linux/rmap.h>
68138@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
68139 return -EINVAL;
68140 if (end == start)
68141 return 0;
68142+ if (end > TASK_SIZE)
68143+ return -EINVAL;
68144+
68145 vma = find_vma_prev(current->mm, start, &prev);
68146 if (!vma || vma->vm_start > start)
68147 return -ENOMEM;
68148@@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
68149 for (nstart = start ; ; ) {
68150 vm_flags_t newflags;
68151
68152+#ifdef CONFIG_PAX_SEGMEXEC
68153+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68154+ break;
68155+#endif
68156+
68157 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68158
68159 newflags = vma->vm_flags | VM_LOCKED;
68160@@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
68161 lock_limit >>= PAGE_SHIFT;
68162
68163 /* check against resource limits */
68164+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68165 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68166 error = do_mlock(start, len, 1);
68167 up_write(&current->mm->mmap_sem);
68168@@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
68169 static int do_mlockall(int flags)
68170 {
68171 struct vm_area_struct * vma, * prev = NULL;
68172- unsigned int def_flags = 0;
68173
68174 if (flags & MCL_FUTURE)
68175- def_flags = VM_LOCKED;
68176- current->mm->def_flags = def_flags;
68177+ current->mm->def_flags |= VM_LOCKED;
68178+ else
68179+ current->mm->def_flags &= ~VM_LOCKED;
68180 if (flags == MCL_FUTURE)
68181 goto out;
68182
68183 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68184 vm_flags_t newflags;
68185
68186+#ifdef CONFIG_PAX_SEGMEXEC
68187+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68188+ break;
68189+#endif
68190+
68191+ BUG_ON(vma->vm_end > TASK_SIZE);
68192 newflags = vma->vm_flags | VM_LOCKED;
68193 if (!(flags & MCL_CURRENT))
68194 newflags &= ~VM_LOCKED;
68195@@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68196 lock_limit >>= PAGE_SHIFT;
68197
68198 ret = -ENOMEM;
68199+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68200 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68201 capable(CAP_IPC_LOCK))
68202 ret = do_mlockall(flags);
68203diff --git a/mm/mmap.c b/mm/mmap.c
68204index eae90af..51ca80b 100644
68205--- a/mm/mmap.c
68206+++ b/mm/mmap.c
68207@@ -46,6 +46,16 @@
68208 #define arch_rebalance_pgtables(addr, len) (addr)
68209 #endif
68210
68211+static inline void verify_mm_writelocked(struct mm_struct *mm)
68212+{
68213+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68214+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68215+ up_read(&mm->mmap_sem);
68216+ BUG();
68217+ }
68218+#endif
68219+}
68220+
68221 static void unmap_region(struct mm_struct *mm,
68222 struct vm_area_struct *vma, struct vm_area_struct *prev,
68223 unsigned long start, unsigned long end);
68224@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
68225 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68226 *
68227 */
68228-pgprot_t protection_map[16] = {
68229+pgprot_t protection_map[16] __read_only = {
68230 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68231 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68232 };
68233
68234-pgprot_t vm_get_page_prot(unsigned long vm_flags)
68235+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68236 {
68237- return __pgprot(pgprot_val(protection_map[vm_flags &
68238+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68239 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68240 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68241+
68242+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68243+ if (!(__supported_pte_mask & _PAGE_NX) &&
68244+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68245+ (vm_flags & (VM_READ | VM_WRITE)))
68246+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68247+#endif
68248+
68249+ return prot;
68250 }
68251 EXPORT_SYMBOL(vm_get_page_prot);
68252
68253 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68254 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68255 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68256+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68257 /*
68258 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68259 * other variables. It can be updated by several CPUs frequently.
68260@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
68261 struct vm_area_struct *next = vma->vm_next;
68262
68263 might_sleep();
68264+ BUG_ON(vma->vm_mirror);
68265 if (vma->vm_ops && vma->vm_ops->close)
68266 vma->vm_ops->close(vma);
68267 if (vma->vm_file) {
68268@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68269 * not page aligned -Ram Gupta
68270 */
68271 rlim = rlimit(RLIMIT_DATA);
68272+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68273 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68274 (mm->end_data - mm->start_data) > rlim)
68275 goto out;
68276@@ -689,6 +711,12 @@ static int
68277 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68278 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68279 {
68280+
68281+#ifdef CONFIG_PAX_SEGMEXEC
68282+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68283+ return 0;
68284+#endif
68285+
68286 if (is_mergeable_vma(vma, file, vm_flags) &&
68287 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68288 if (vma->vm_pgoff == vm_pgoff)
68289@@ -708,6 +736,12 @@ static int
68290 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68291 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68292 {
68293+
68294+#ifdef CONFIG_PAX_SEGMEXEC
68295+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68296+ return 0;
68297+#endif
68298+
68299 if (is_mergeable_vma(vma, file, vm_flags) &&
68300 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68301 pgoff_t vm_pglen;
68302@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68303 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68304 struct vm_area_struct *prev, unsigned long addr,
68305 unsigned long end, unsigned long vm_flags,
68306- struct anon_vma *anon_vma, struct file *file,
68307+ struct anon_vma *anon_vma, struct file *file,
68308 pgoff_t pgoff, struct mempolicy *policy)
68309 {
68310 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68311 struct vm_area_struct *area, *next;
68312 int err;
68313
68314+#ifdef CONFIG_PAX_SEGMEXEC
68315+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68316+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68317+
68318+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68319+#endif
68320+
68321 /*
68322 * We later require that vma->vm_flags == vm_flags,
68323 * so this tests vma->vm_flags & VM_SPECIAL, too.
68324@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68325 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68326 next = next->vm_next;
68327
68328+#ifdef CONFIG_PAX_SEGMEXEC
68329+ if (prev)
68330+ prev_m = pax_find_mirror_vma(prev);
68331+ if (area)
68332+ area_m = pax_find_mirror_vma(area);
68333+ if (next)
68334+ next_m = pax_find_mirror_vma(next);
68335+#endif
68336+
68337 /*
68338 * Can it merge with the predecessor?
68339 */
68340@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68341 /* cases 1, 6 */
68342 err = vma_adjust(prev, prev->vm_start,
68343 next->vm_end, prev->vm_pgoff, NULL);
68344- } else /* cases 2, 5, 7 */
68345+
68346+#ifdef CONFIG_PAX_SEGMEXEC
68347+ if (!err && prev_m)
68348+ err = vma_adjust(prev_m, prev_m->vm_start,
68349+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68350+#endif
68351+
68352+ } else { /* cases 2, 5, 7 */
68353 err = vma_adjust(prev, prev->vm_start,
68354 end, prev->vm_pgoff, NULL);
68355+
68356+#ifdef CONFIG_PAX_SEGMEXEC
68357+ if (!err && prev_m)
68358+ err = vma_adjust(prev_m, prev_m->vm_start,
68359+ end_m, prev_m->vm_pgoff, NULL);
68360+#endif
68361+
68362+ }
68363 if (err)
68364 return NULL;
68365 khugepaged_enter_vma_merge(prev);
68366@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68367 mpol_equal(policy, vma_policy(next)) &&
68368 can_vma_merge_before(next, vm_flags,
68369 anon_vma, file, pgoff+pglen)) {
68370- if (prev && addr < prev->vm_end) /* case 4 */
68371+ if (prev && addr < prev->vm_end) { /* case 4 */
68372 err = vma_adjust(prev, prev->vm_start,
68373 addr, prev->vm_pgoff, NULL);
68374- else /* cases 3, 8 */
68375+
68376+#ifdef CONFIG_PAX_SEGMEXEC
68377+ if (!err && prev_m)
68378+ err = vma_adjust(prev_m, prev_m->vm_start,
68379+ addr_m, prev_m->vm_pgoff, NULL);
68380+#endif
68381+
68382+ } else { /* cases 3, 8 */
68383 err = vma_adjust(area, addr, next->vm_end,
68384 next->vm_pgoff - pglen, NULL);
68385+
68386+#ifdef CONFIG_PAX_SEGMEXEC
68387+ if (!err && area_m)
68388+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
68389+ next_m->vm_pgoff - pglen, NULL);
68390+#endif
68391+
68392+ }
68393 if (err)
68394 return NULL;
68395 khugepaged_enter_vma_merge(area);
68396@@ -921,14 +1001,11 @@ none:
68397 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68398 struct file *file, long pages)
68399 {
68400- const unsigned long stack_flags
68401- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68402-
68403 if (file) {
68404 mm->shared_vm += pages;
68405 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68406 mm->exec_vm += pages;
68407- } else if (flags & stack_flags)
68408+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68409 mm->stack_vm += pages;
68410 if (flags & (VM_RESERVED|VM_IO))
68411 mm->reserved_vm += pages;
68412@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68413 * (the exception is when the underlying filesystem is noexec
68414 * mounted, in which case we dont add PROT_EXEC.)
68415 */
68416- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68417+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68418 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68419 prot |= PROT_EXEC;
68420
68421@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68422 /* Obtain the address to map to. we verify (or select) it and ensure
68423 * that it represents a valid section of the address space.
68424 */
68425- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68426+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68427 if (addr & ~PAGE_MASK)
68428 return addr;
68429
68430@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68431 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68432 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68433
68434+#ifdef CONFIG_PAX_MPROTECT
68435+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68436+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68437+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68438+ gr_log_rwxmmap(file);
68439+
68440+#ifdef CONFIG_PAX_EMUPLT
68441+ vm_flags &= ~VM_EXEC;
68442+#else
68443+ return -EPERM;
68444+#endif
68445+
68446+ }
68447+
68448+ if (!(vm_flags & VM_EXEC))
68449+ vm_flags &= ~VM_MAYEXEC;
68450+#else
68451+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68452+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68453+#endif
68454+ else
68455+ vm_flags &= ~VM_MAYWRITE;
68456+ }
68457+#endif
68458+
68459+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68460+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68461+ vm_flags &= ~VM_PAGEEXEC;
68462+#endif
68463+
68464 if (flags & MAP_LOCKED)
68465 if (!can_do_mlock())
68466 return -EPERM;
68467@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68468 locked += mm->locked_vm;
68469 lock_limit = rlimit(RLIMIT_MEMLOCK);
68470 lock_limit >>= PAGE_SHIFT;
68471+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68472 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68473 return -EAGAIN;
68474 }
68475@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68476 if (error)
68477 return error;
68478
68479+ if (!gr_acl_handle_mmap(file, prot))
68480+ return -EACCES;
68481+
68482 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68483 }
68484 EXPORT_SYMBOL(do_mmap_pgoff);
68485@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
68486 vm_flags_t vm_flags = vma->vm_flags;
68487
68488 /* If it was private or non-writable, the write bit is already clear */
68489- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68490+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68491 return 0;
68492
68493 /* The backer wishes to know when pages are first written to? */
68494@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
68495 unsigned long charged = 0;
68496 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68497
68498+#ifdef CONFIG_PAX_SEGMEXEC
68499+ struct vm_area_struct *vma_m = NULL;
68500+#endif
68501+
68502+ /*
68503+ * mm->mmap_sem is required to protect against another thread
68504+ * changing the mappings in case we sleep.
68505+ */
68506+ verify_mm_writelocked(mm);
68507+
68508 /* Clear old maps */
68509 error = -ENOMEM;
68510-munmap_back:
68511 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68512 if (vma && vma->vm_start < addr + len) {
68513 if (do_munmap(mm, addr, len))
68514 return -ENOMEM;
68515- goto munmap_back;
68516+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68517+ BUG_ON(vma && vma->vm_start < addr + len);
68518 }
68519
68520 /* Check against address space limit. */
68521@@ -1258,6 +1379,16 @@ munmap_back:
68522 goto unacct_error;
68523 }
68524
68525+#ifdef CONFIG_PAX_SEGMEXEC
68526+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68527+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68528+ if (!vma_m) {
68529+ error = -ENOMEM;
68530+ goto free_vma;
68531+ }
68532+ }
68533+#endif
68534+
68535 vma->vm_mm = mm;
68536 vma->vm_start = addr;
68537 vma->vm_end = addr + len;
68538@@ -1281,6 +1412,19 @@ munmap_back:
68539 error = file->f_op->mmap(file, vma);
68540 if (error)
68541 goto unmap_and_free_vma;
68542+
68543+#ifdef CONFIG_PAX_SEGMEXEC
68544+ if (vma_m && (vm_flags & VM_EXECUTABLE))
68545+ added_exe_file_vma(mm);
68546+#endif
68547+
68548+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68549+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68550+ vma->vm_flags |= VM_PAGEEXEC;
68551+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68552+ }
68553+#endif
68554+
68555 if (vm_flags & VM_EXECUTABLE)
68556 added_exe_file_vma(mm);
68557
68558@@ -1316,6 +1460,11 @@ munmap_back:
68559 vma_link(mm, vma, prev, rb_link, rb_parent);
68560 file = vma->vm_file;
68561
68562+#ifdef CONFIG_PAX_SEGMEXEC
68563+ if (vma_m)
68564+ BUG_ON(pax_mirror_vma(vma_m, vma));
68565+#endif
68566+
68567 /* Once vma denies write, undo our temporary denial count */
68568 if (correct_wcount)
68569 atomic_inc(&inode->i_writecount);
68570@@ -1324,6 +1473,7 @@ out:
68571
68572 mm->total_vm += len >> PAGE_SHIFT;
68573 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68574+ track_exec_limit(mm, addr, addr + len, vm_flags);
68575 if (vm_flags & VM_LOCKED) {
68576 if (!mlock_vma_pages_range(vma, addr, addr + len))
68577 mm->locked_vm += (len >> PAGE_SHIFT);
68578@@ -1341,6 +1491,12 @@ unmap_and_free_vma:
68579 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68580 charged = 0;
68581 free_vma:
68582+
68583+#ifdef CONFIG_PAX_SEGMEXEC
68584+ if (vma_m)
68585+ kmem_cache_free(vm_area_cachep, vma_m);
68586+#endif
68587+
68588 kmem_cache_free(vm_area_cachep, vma);
68589 unacct_error:
68590 if (charged)
68591@@ -1348,6 +1504,44 @@ unacct_error:
68592 return error;
68593 }
68594
68595+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68596+{
68597+ if (!vma) {
68598+#ifdef CONFIG_STACK_GROWSUP
68599+ if (addr > sysctl_heap_stack_gap)
68600+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68601+ else
68602+ vma = find_vma(current->mm, 0);
68603+ if (vma && (vma->vm_flags & VM_GROWSUP))
68604+ return false;
68605+#endif
68606+ return true;
68607+ }
68608+
68609+ if (addr + len > vma->vm_start)
68610+ return false;
68611+
68612+ if (vma->vm_flags & VM_GROWSDOWN)
68613+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68614+#ifdef CONFIG_STACK_GROWSUP
68615+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68616+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68617+#endif
68618+
68619+ return true;
68620+}
68621+
68622+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68623+{
68624+ if (vma->vm_start < len)
68625+ return -ENOMEM;
68626+ if (!(vma->vm_flags & VM_GROWSDOWN))
68627+ return vma->vm_start - len;
68628+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
68629+ return vma->vm_start - len - sysctl_heap_stack_gap;
68630+ return -ENOMEM;
68631+}
68632+
68633 /* Get an address range which is currently unmapped.
68634 * For shmat() with addr=0.
68635 *
68636@@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
68637 if (flags & MAP_FIXED)
68638 return addr;
68639
68640+#ifdef CONFIG_PAX_RANDMMAP
68641+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68642+#endif
68643+
68644 if (addr) {
68645 addr = PAGE_ALIGN(addr);
68646- vma = find_vma(mm, addr);
68647- if (TASK_SIZE - len >= addr &&
68648- (!vma || addr + len <= vma->vm_start))
68649- return addr;
68650+ if (TASK_SIZE - len >= addr) {
68651+ vma = find_vma(mm, addr);
68652+ if (check_heap_stack_gap(vma, addr, len))
68653+ return addr;
68654+ }
68655 }
68656 if (len > mm->cached_hole_size) {
68657- start_addr = addr = mm->free_area_cache;
68658+ start_addr = addr = mm->free_area_cache;
68659 } else {
68660- start_addr = addr = TASK_UNMAPPED_BASE;
68661- mm->cached_hole_size = 0;
68662+ start_addr = addr = mm->mmap_base;
68663+ mm->cached_hole_size = 0;
68664 }
68665
68666 full_search:
68667@@ -1396,34 +1595,40 @@ full_search:
68668 * Start a new search - just in case we missed
68669 * some holes.
68670 */
68671- if (start_addr != TASK_UNMAPPED_BASE) {
68672- addr = TASK_UNMAPPED_BASE;
68673- start_addr = addr;
68674+ if (start_addr != mm->mmap_base) {
68675+ start_addr = addr = mm->mmap_base;
68676 mm->cached_hole_size = 0;
68677 goto full_search;
68678 }
68679 return -ENOMEM;
68680 }
68681- if (!vma || addr + len <= vma->vm_start) {
68682- /*
68683- * Remember the place where we stopped the search:
68684- */
68685- mm->free_area_cache = addr + len;
68686- return addr;
68687- }
68688+ if (check_heap_stack_gap(vma, addr, len))
68689+ break;
68690 if (addr + mm->cached_hole_size < vma->vm_start)
68691 mm->cached_hole_size = vma->vm_start - addr;
68692 addr = vma->vm_end;
68693 }
68694+
68695+ /*
68696+ * Remember the place where we stopped the search:
68697+ */
68698+ mm->free_area_cache = addr + len;
68699+ return addr;
68700 }
68701 #endif
68702
68703 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68704 {
68705+
68706+#ifdef CONFIG_PAX_SEGMEXEC
68707+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68708+ return;
68709+#endif
68710+
68711 /*
68712 * Is this a new hole at the lowest possible address?
68713 */
68714- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68715+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68716 mm->free_area_cache = addr;
68717 mm->cached_hole_size = ~0UL;
68718 }
68719@@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
68720 {
68721 struct vm_area_struct *vma;
68722 struct mm_struct *mm = current->mm;
68723- unsigned long addr = addr0;
68724+ unsigned long base = mm->mmap_base, addr = addr0;
68725
68726 /* requested length too big for entire address space */
68727 if (len > TASK_SIZE)
68728@@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
68729 if (flags & MAP_FIXED)
68730 return addr;
68731
68732+#ifdef CONFIG_PAX_RANDMMAP
68733+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68734+#endif
68735+
68736 /* requesting a specific address */
68737 if (addr) {
68738 addr = PAGE_ALIGN(addr);
68739- vma = find_vma(mm, addr);
68740- if (TASK_SIZE - len >= addr &&
68741- (!vma || addr + len <= vma->vm_start))
68742- return addr;
68743+ if (TASK_SIZE - len >= addr) {
68744+ vma = find_vma(mm, addr);
68745+ if (check_heap_stack_gap(vma, addr, len))
68746+ return addr;
68747+ }
68748 }
68749
68750 /* check if free_area_cache is useful for us */
68751@@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
68752 /* make sure it can fit in the remaining address space */
68753 if (addr > len) {
68754 vma = find_vma(mm, addr-len);
68755- if (!vma || addr <= vma->vm_start)
68756+ if (check_heap_stack_gap(vma, addr - len, len))
68757 /* remember the address as a hint for next time */
68758 return (mm->free_area_cache = addr-len);
68759 }
68760@@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
68761 * return with success:
68762 */
68763 vma = find_vma(mm, addr);
68764- if (!vma || addr+len <= vma->vm_start)
68765+ if (check_heap_stack_gap(vma, addr, len))
68766 /* remember the address as a hint for next time */
68767 return (mm->free_area_cache = addr);
68768
68769@@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
68770 mm->cached_hole_size = vma->vm_start - addr;
68771
68772 /* try just below the current vma->vm_start */
68773- addr = vma->vm_start-len;
68774- } while (len < vma->vm_start);
68775+ addr = skip_heap_stack_gap(vma, len);
68776+ } while (!IS_ERR_VALUE(addr));
68777
68778 bottomup:
68779 /*
68780@@ -1507,13 +1717,21 @@ bottomup:
68781 * can happen with large stack limits and large mmap()
68782 * allocations.
68783 */
68784+ mm->mmap_base = TASK_UNMAPPED_BASE;
68785+
68786+#ifdef CONFIG_PAX_RANDMMAP
68787+ if (mm->pax_flags & MF_PAX_RANDMMAP)
68788+ mm->mmap_base += mm->delta_mmap;
68789+#endif
68790+
68791+ mm->free_area_cache = mm->mmap_base;
68792 mm->cached_hole_size = ~0UL;
68793- mm->free_area_cache = TASK_UNMAPPED_BASE;
68794 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68795 /*
68796 * Restore the topdown base:
68797 */
68798- mm->free_area_cache = mm->mmap_base;
68799+ mm->mmap_base = base;
68800+ mm->free_area_cache = base;
68801 mm->cached_hole_size = ~0UL;
68802
68803 return addr;
68804@@ -1522,6 +1740,12 @@ bottomup:
68805
68806 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68807 {
68808+
68809+#ifdef CONFIG_PAX_SEGMEXEC
68810+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68811+ return;
68812+#endif
68813+
68814 /*
68815 * Is this a new hole at the highest possible address?
68816 */
68817@@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68818 mm->free_area_cache = addr;
68819
68820 /* dont allow allocations above current base */
68821- if (mm->free_area_cache > mm->mmap_base)
68822+ if (mm->free_area_cache > mm->mmap_base) {
68823 mm->free_area_cache = mm->mmap_base;
68824+ mm->cached_hole_size = ~0UL;
68825+ }
68826 }
68827
68828 unsigned long
68829@@ -1638,6 +1864,28 @@ out:
68830 return prev ? prev->vm_next : vma;
68831 }
68832
68833+#ifdef CONFIG_PAX_SEGMEXEC
68834+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68835+{
68836+ struct vm_area_struct *vma_m;
68837+
68838+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68839+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68840+ BUG_ON(vma->vm_mirror);
68841+ return NULL;
68842+ }
68843+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68844+ vma_m = vma->vm_mirror;
68845+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68846+ BUG_ON(vma->vm_file != vma_m->vm_file);
68847+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68848+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
68849+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
68850+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68851+ return vma_m;
68852+}
68853+#endif
68854+
68855 /*
68856 * Verify that the stack growth is acceptable and
68857 * update accounting. This is shared with both the
68858@@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
68859 return -ENOMEM;
68860
68861 /* Stack limit test */
68862+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
68863 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
68864 return -ENOMEM;
68865
68866@@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
68867 locked = mm->locked_vm + grow;
68868 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
68869 limit >>= PAGE_SHIFT;
68870+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68871 if (locked > limit && !capable(CAP_IPC_LOCK))
68872 return -ENOMEM;
68873 }
68874@@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
68875 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68876 * vma is the last one with address > vma->vm_end. Have to extend vma.
68877 */
68878+#ifndef CONFIG_IA64
68879+static
68880+#endif
68881 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68882 {
68883 int error;
68884+ bool locknext;
68885
68886 if (!(vma->vm_flags & VM_GROWSUP))
68887 return -EFAULT;
68888
68889+ /* Also guard against wrapping around to address 0. */
68890+ if (address < PAGE_ALIGN(address+1))
68891+ address = PAGE_ALIGN(address+1);
68892+ else
68893+ return -ENOMEM;
68894+
68895 /*
68896 * We must make sure the anon_vma is allocated
68897 * so that the anon_vma locking is not a noop.
68898 */
68899 if (unlikely(anon_vma_prepare(vma)))
68900 return -ENOMEM;
68901+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68902+ if (locknext && anon_vma_prepare(vma->vm_next))
68903+ return -ENOMEM;
68904 vma_lock_anon_vma(vma);
68905+ if (locknext)
68906+ vma_lock_anon_vma(vma->vm_next);
68907
68908 /*
68909 * vma->vm_start/vm_end cannot change under us because the caller
68910 * is required to hold the mmap_sem in read mode. We need the
68911- * anon_vma lock to serialize against concurrent expand_stacks.
68912- * Also guard against wrapping around to address 0.
68913+ * anon_vma locks to serialize against concurrent expand_stacks
68914+ * and expand_upwards.
68915 */
68916- if (address < PAGE_ALIGN(address+4))
68917- address = PAGE_ALIGN(address+4);
68918- else {
68919- vma_unlock_anon_vma(vma);
68920- return -ENOMEM;
68921- }
68922 error = 0;
68923
68924 /* Somebody else might have raced and expanded it already */
68925- if (address > vma->vm_end) {
68926+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
68927+ error = -ENOMEM;
68928+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
68929 unsigned long size, grow;
68930
68931 size = address - vma->vm_start;
68932@@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68933 }
68934 }
68935 }
68936+ if (locknext)
68937+ vma_unlock_anon_vma(vma->vm_next);
68938 vma_unlock_anon_vma(vma);
68939 khugepaged_enter_vma_merge(vma);
68940 return error;
68941@@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_struct *vma,
68942 unsigned long address)
68943 {
68944 int error;
68945+ bool lockprev = false;
68946+ struct vm_area_struct *prev;
68947
68948 /*
68949 * We must make sure the anon_vma is allocated
68950@@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_struct *vma,
68951 if (error)
68952 return error;
68953
68954+ prev = vma->vm_prev;
68955+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
68956+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
68957+#endif
68958+ if (lockprev && anon_vma_prepare(prev))
68959+ return -ENOMEM;
68960+ if (lockprev)
68961+ vma_lock_anon_vma(prev);
68962+
68963 vma_lock_anon_vma(vma);
68964
68965 /*
68966@@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_struct *vma,
68967 */
68968
68969 /* Somebody else might have raced and expanded it already */
68970- if (address < vma->vm_start) {
68971+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
68972+ error = -ENOMEM;
68973+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
68974 unsigned long size, grow;
68975
68976+#ifdef CONFIG_PAX_SEGMEXEC
68977+ struct vm_area_struct *vma_m;
68978+
68979+ vma_m = pax_find_mirror_vma(vma);
68980+#endif
68981+
68982 size = vma->vm_end - address;
68983 grow = (vma->vm_start - address) >> PAGE_SHIFT;
68984
68985@@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_struct *vma,
68986 if (!error) {
68987 vma->vm_start = address;
68988 vma->vm_pgoff -= grow;
68989+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
68990+
68991+#ifdef CONFIG_PAX_SEGMEXEC
68992+ if (vma_m) {
68993+ vma_m->vm_start -= grow << PAGE_SHIFT;
68994+ vma_m->vm_pgoff -= grow;
68995+ }
68996+#endif
68997+
68998 perf_event_mmap(vma);
68999 }
69000 }
69001 }
69002 vma_unlock_anon_vma(vma);
69003+ if (lockprev)
69004+ vma_unlock_anon_vma(prev);
69005 khugepaged_enter_vma_merge(vma);
69006 return error;
69007 }
69008@@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
69009 do {
69010 long nrpages = vma_pages(vma);
69011
69012+#ifdef CONFIG_PAX_SEGMEXEC
69013+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69014+ vma = remove_vma(vma);
69015+ continue;
69016+ }
69017+#endif
69018+
69019 mm->total_vm -= nrpages;
69020 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69021 vma = remove_vma(vma);
69022@@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
69023 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69024 vma->vm_prev = NULL;
69025 do {
69026+
69027+#ifdef CONFIG_PAX_SEGMEXEC
69028+ if (vma->vm_mirror) {
69029+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69030+ vma->vm_mirror->vm_mirror = NULL;
69031+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
69032+ vma->vm_mirror = NULL;
69033+ }
69034+#endif
69035+
69036 rb_erase(&vma->vm_rb, &mm->mm_rb);
69037 mm->map_count--;
69038 tail_vma = vma;
69039@@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69040 struct vm_area_struct *new;
69041 int err = -ENOMEM;
69042
69043+#ifdef CONFIG_PAX_SEGMEXEC
69044+ struct vm_area_struct *vma_m, *new_m = NULL;
69045+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69046+#endif
69047+
69048 if (is_vm_hugetlb_page(vma) && (addr &
69049 ~(huge_page_mask(hstate_vma(vma)))))
69050 return -EINVAL;
69051
69052+#ifdef CONFIG_PAX_SEGMEXEC
69053+ vma_m = pax_find_mirror_vma(vma);
69054+#endif
69055+
69056 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69057 if (!new)
69058 goto out_err;
69059
69060+#ifdef CONFIG_PAX_SEGMEXEC
69061+ if (vma_m) {
69062+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69063+ if (!new_m) {
69064+ kmem_cache_free(vm_area_cachep, new);
69065+ goto out_err;
69066+ }
69067+ }
69068+#endif
69069+
69070 /* most fields are the same, copy all, and then fixup */
69071 *new = *vma;
69072
69073@@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69074 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69075 }
69076
69077+#ifdef CONFIG_PAX_SEGMEXEC
69078+ if (vma_m) {
69079+ *new_m = *vma_m;
69080+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
69081+ new_m->vm_mirror = new;
69082+ new->vm_mirror = new_m;
69083+
69084+ if (new_below)
69085+ new_m->vm_end = addr_m;
69086+ else {
69087+ new_m->vm_start = addr_m;
69088+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69089+ }
69090+ }
69091+#endif
69092+
69093 pol = mpol_dup(vma_policy(vma));
69094 if (IS_ERR(pol)) {
69095 err = PTR_ERR(pol);
69096@@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69097 else
69098 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69099
69100+#ifdef CONFIG_PAX_SEGMEXEC
69101+ if (!err && vma_m) {
69102+ if (anon_vma_clone(new_m, vma_m))
69103+ goto out_free_mpol;
69104+
69105+ mpol_get(pol);
69106+ vma_set_policy(new_m, pol);
69107+
69108+ if (new_m->vm_file) {
69109+ get_file(new_m->vm_file);
69110+ if (vma_m->vm_flags & VM_EXECUTABLE)
69111+ added_exe_file_vma(mm);
69112+ }
69113+
69114+ if (new_m->vm_ops && new_m->vm_ops->open)
69115+ new_m->vm_ops->open(new_m);
69116+
69117+ if (new_below)
69118+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69119+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69120+ else
69121+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69122+
69123+ if (err) {
69124+ if (new_m->vm_ops && new_m->vm_ops->close)
69125+ new_m->vm_ops->close(new_m);
69126+ if (new_m->vm_file) {
69127+ if (vma_m->vm_flags & VM_EXECUTABLE)
69128+ removed_exe_file_vma(mm);
69129+ fput(new_m->vm_file);
69130+ }
69131+ mpol_put(pol);
69132+ }
69133+ }
69134+#endif
69135+
69136 /* Success. */
69137 if (!err)
69138 return 0;
69139@@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69140 removed_exe_file_vma(mm);
69141 fput(new->vm_file);
69142 }
69143- unlink_anon_vmas(new);
69144 out_free_mpol:
69145 mpol_put(pol);
69146 out_free_vma:
69147+
69148+#ifdef CONFIG_PAX_SEGMEXEC
69149+ if (new_m) {
69150+ unlink_anon_vmas(new_m);
69151+ kmem_cache_free(vm_area_cachep, new_m);
69152+ }
69153+#endif
69154+
69155+ unlink_anon_vmas(new);
69156 kmem_cache_free(vm_area_cachep, new);
69157 out_err:
69158 return err;
69159@@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69160 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69161 unsigned long addr, int new_below)
69162 {
69163+
69164+#ifdef CONFIG_PAX_SEGMEXEC
69165+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69166+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69167+ if (mm->map_count >= sysctl_max_map_count-1)
69168+ return -ENOMEM;
69169+ } else
69170+#endif
69171+
69172 if (mm->map_count >= sysctl_max_map_count)
69173 return -ENOMEM;
69174
69175@@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69176 * work. This now handles partial unmappings.
69177 * Jeremy Fitzhardinge <jeremy@goop.org>
69178 */
69179+#ifdef CONFIG_PAX_SEGMEXEC
69180 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69181 {
69182+ int ret = __do_munmap(mm, start, len);
69183+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69184+ return ret;
69185+
69186+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69187+}
69188+
69189+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69190+#else
69191+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69192+#endif
69193+{
69194 unsigned long end;
69195 struct vm_area_struct *vma, *prev, *last;
69196
69197+ /*
69198+ * mm->mmap_sem is required to protect against another thread
69199+ * changing the mappings in case we sleep.
69200+ */
69201+ verify_mm_writelocked(mm);
69202+
69203 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69204 return -EINVAL;
69205
69206@@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69207 /* Fix up all other VM information */
69208 remove_vma_list(mm, vma);
69209
69210+ track_exec_limit(mm, start, end, 0UL);
69211+
69212 return 0;
69213 }
69214
69215@@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
69216
69217 profile_munmap(addr);
69218
69219+#ifdef CONFIG_PAX_SEGMEXEC
69220+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69221+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69222+ return -EINVAL;
69223+#endif
69224+
69225 down_write(&mm->mmap_sem);
69226 ret = do_munmap(mm, addr, len);
69227 up_write(&mm->mmap_sem);
69228 return ret;
69229 }
69230
69231-static inline void verify_mm_writelocked(struct mm_struct *mm)
69232-{
69233-#ifdef CONFIG_DEBUG_VM
69234- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69235- WARN_ON(1);
69236- up_read(&mm->mmap_sem);
69237- }
69238-#endif
69239-}
69240-
69241 /*
69242 * this is really a simplified "do_mmap". it only handles
69243 * anonymous maps. eventually we may be able to do some
69244@@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69245 struct rb_node ** rb_link, * rb_parent;
69246 pgoff_t pgoff = addr >> PAGE_SHIFT;
69247 int error;
69248+ unsigned long charged;
69249
69250 len = PAGE_ALIGN(len);
69251 if (!len)
69252@@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69253
69254 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69255
69256+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69257+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69258+ flags &= ~VM_EXEC;
69259+
69260+#ifdef CONFIG_PAX_MPROTECT
69261+ if (mm->pax_flags & MF_PAX_MPROTECT)
69262+ flags &= ~VM_MAYEXEC;
69263+#endif
69264+
69265+ }
69266+#endif
69267+
69268 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69269 if (error & ~PAGE_MASK)
69270 return error;
69271
69272+ charged = len >> PAGE_SHIFT;
69273+
69274 /*
69275 * mlock MCL_FUTURE?
69276 */
69277 if (mm->def_flags & VM_LOCKED) {
69278 unsigned long locked, lock_limit;
69279- locked = len >> PAGE_SHIFT;
69280+ locked = charged;
69281 locked += mm->locked_vm;
69282 lock_limit = rlimit(RLIMIT_MEMLOCK);
69283 lock_limit >>= PAGE_SHIFT;
69284@@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69285 /*
69286 * Clear old maps. this also does some error checking for us
69287 */
69288- munmap_back:
69289 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69290 if (vma && vma->vm_start < addr + len) {
69291 if (do_munmap(mm, addr, len))
69292 return -ENOMEM;
69293- goto munmap_back;
69294+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69295+ BUG_ON(vma && vma->vm_start < addr + len);
69296 }
69297
69298 /* Check against address space limits *after* clearing old maps... */
69299- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69300+ if (!may_expand_vm(mm, charged))
69301 return -ENOMEM;
69302
69303 if (mm->map_count > sysctl_max_map_count)
69304 return -ENOMEM;
69305
69306- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69307+ if (security_vm_enough_memory(charged))
69308 return -ENOMEM;
69309
69310 /* Can we just expand an old private anonymous mapping? */
69311@@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69312 */
69313 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69314 if (!vma) {
69315- vm_unacct_memory(len >> PAGE_SHIFT);
69316+ vm_unacct_memory(charged);
69317 return -ENOMEM;
69318 }
69319
69320@@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69321 vma_link(mm, vma, prev, rb_link, rb_parent);
69322 out:
69323 perf_event_mmap(vma);
69324- mm->total_vm += len >> PAGE_SHIFT;
69325+ mm->total_vm += charged;
69326 if (flags & VM_LOCKED) {
69327 if (!mlock_vma_pages_range(vma, addr, addr + len))
69328- mm->locked_vm += (len >> PAGE_SHIFT);
69329+ mm->locked_vm += charged;
69330 }
69331+ track_exec_limit(mm, addr, addr + len, flags);
69332 return addr;
69333 }
69334
69335@@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
69336 * Walk the list again, actually closing and freeing it,
69337 * with preemption enabled, without holding any MM locks.
69338 */
69339- while (vma)
69340+ while (vma) {
69341+ vma->vm_mirror = NULL;
69342 vma = remove_vma(vma);
69343+ }
69344
69345 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69346 }
69347@@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69348 struct vm_area_struct * __vma, * prev;
69349 struct rb_node ** rb_link, * rb_parent;
69350
69351+#ifdef CONFIG_PAX_SEGMEXEC
69352+ struct vm_area_struct *vma_m = NULL;
69353+#endif
69354+
69355+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69356+ return -EPERM;
69357+
69358 /*
69359 * The vm_pgoff of a purely anonymous vma should be irrelevant
69360 * until its first write fault, when page's anon_vma and index
69361@@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69362 if ((vma->vm_flags & VM_ACCOUNT) &&
69363 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69364 return -ENOMEM;
69365+
69366+#ifdef CONFIG_PAX_SEGMEXEC
69367+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69368+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69369+ if (!vma_m)
69370+ return -ENOMEM;
69371+ }
69372+#endif
69373+
69374 vma_link(mm, vma, prev, rb_link, rb_parent);
69375+
69376+#ifdef CONFIG_PAX_SEGMEXEC
69377+ if (vma_m)
69378+ BUG_ON(pax_mirror_vma(vma_m, vma));
69379+#endif
69380+
69381 return 0;
69382 }
69383
69384@@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69385 struct rb_node **rb_link, *rb_parent;
69386 struct mempolicy *pol;
69387
69388+ BUG_ON(vma->vm_mirror);
69389+
69390 /*
69391 * If anonymous vma has not yet been faulted, update new pgoff
69392 * to match new location, to increase its chance of merging.
69393@@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69394 return NULL;
69395 }
69396
69397+#ifdef CONFIG_PAX_SEGMEXEC
69398+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69399+{
69400+ struct vm_area_struct *prev_m;
69401+ struct rb_node **rb_link_m, *rb_parent_m;
69402+ struct mempolicy *pol_m;
69403+
69404+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69405+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69406+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69407+ *vma_m = *vma;
69408+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69409+ if (anon_vma_clone(vma_m, vma))
69410+ return -ENOMEM;
69411+ pol_m = vma_policy(vma_m);
69412+ mpol_get(pol_m);
69413+ vma_set_policy(vma_m, pol_m);
69414+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69415+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69416+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69417+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69418+ if (vma_m->vm_file)
69419+ get_file(vma_m->vm_file);
69420+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69421+ vma_m->vm_ops->open(vma_m);
69422+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69423+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69424+ vma_m->vm_mirror = vma;
69425+ vma->vm_mirror = vma_m;
69426+ return 0;
69427+}
69428+#endif
69429+
69430 /*
69431 * Return true if the calling process may expand its vm space by the passed
69432 * number of pages
69433@@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
69434 unsigned long lim;
69435
69436 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69437-
69438+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69439 if (cur + npages > lim)
69440 return 0;
69441 return 1;
69442@@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_struct *mm,
69443 vma->vm_start = addr;
69444 vma->vm_end = addr + len;
69445
69446+#ifdef CONFIG_PAX_MPROTECT
69447+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69448+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69449+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69450+ return -EPERM;
69451+ if (!(vm_flags & VM_EXEC))
69452+ vm_flags &= ~VM_MAYEXEC;
69453+#else
69454+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69455+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69456+#endif
69457+ else
69458+ vm_flags &= ~VM_MAYWRITE;
69459+ }
69460+#endif
69461+
69462 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69463 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69464
69465diff --git a/mm/mprotect.c b/mm/mprotect.c
69466index 5a688a2..27e031c 100644
69467--- a/mm/mprotect.c
69468+++ b/mm/mprotect.c
69469@@ -23,10 +23,16 @@
69470 #include <linux/mmu_notifier.h>
69471 #include <linux/migrate.h>
69472 #include <linux/perf_event.h>
69473+
69474+#ifdef CONFIG_PAX_MPROTECT
69475+#include <linux/elf.h>
69476+#endif
69477+
69478 #include <asm/uaccess.h>
69479 #include <asm/pgtable.h>
69480 #include <asm/cacheflush.h>
69481 #include <asm/tlbflush.h>
69482+#include <asm/mmu_context.h>
69483
69484 #ifndef pgprot_modify
69485 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69486@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
69487 flush_tlb_range(vma, start, end);
69488 }
69489
69490+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69491+/* called while holding the mmap semaphor for writing except stack expansion */
69492+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69493+{
69494+ unsigned long oldlimit, newlimit = 0UL;
69495+
69496+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
69497+ return;
69498+
69499+ spin_lock(&mm->page_table_lock);
69500+ oldlimit = mm->context.user_cs_limit;
69501+ if ((prot & VM_EXEC) && oldlimit < end)
69502+ /* USER_CS limit moved up */
69503+ newlimit = end;
69504+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69505+ /* USER_CS limit moved down */
69506+ newlimit = start;
69507+
69508+ if (newlimit) {
69509+ mm->context.user_cs_limit = newlimit;
69510+
69511+#ifdef CONFIG_SMP
69512+ wmb();
69513+ cpus_clear(mm->context.cpu_user_cs_mask);
69514+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69515+#endif
69516+
69517+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69518+ }
69519+ spin_unlock(&mm->page_table_lock);
69520+ if (newlimit == end) {
69521+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69522+
69523+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69524+ if (is_vm_hugetlb_page(vma))
69525+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69526+ else
69527+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69528+ }
69529+}
69530+#endif
69531+
69532 int
69533 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69534 unsigned long start, unsigned long end, unsigned long newflags)
69535@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69536 int error;
69537 int dirty_accountable = 0;
69538
69539+#ifdef CONFIG_PAX_SEGMEXEC
69540+ struct vm_area_struct *vma_m = NULL;
69541+ unsigned long start_m, end_m;
69542+
69543+ start_m = start + SEGMEXEC_TASK_SIZE;
69544+ end_m = end + SEGMEXEC_TASK_SIZE;
69545+#endif
69546+
69547 if (newflags == oldflags) {
69548 *pprev = vma;
69549 return 0;
69550 }
69551
69552+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69553+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69554+
69555+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69556+ return -ENOMEM;
69557+
69558+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69559+ return -ENOMEM;
69560+ }
69561+
69562 /*
69563 * If we make a private mapping writable we increase our commit;
69564 * but (without finer accounting) cannot reduce our commit if we
69565@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69566 }
69567 }
69568
69569+#ifdef CONFIG_PAX_SEGMEXEC
69570+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69571+ if (start != vma->vm_start) {
69572+ error = split_vma(mm, vma, start, 1);
69573+ if (error)
69574+ goto fail;
69575+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69576+ *pprev = (*pprev)->vm_next;
69577+ }
69578+
69579+ if (end != vma->vm_end) {
69580+ error = split_vma(mm, vma, end, 0);
69581+ if (error)
69582+ goto fail;
69583+ }
69584+
69585+ if (pax_find_mirror_vma(vma)) {
69586+ error = __do_munmap(mm, start_m, end_m - start_m);
69587+ if (error)
69588+ goto fail;
69589+ } else {
69590+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69591+ if (!vma_m) {
69592+ error = -ENOMEM;
69593+ goto fail;
69594+ }
69595+ vma->vm_flags = newflags;
69596+ error = pax_mirror_vma(vma_m, vma);
69597+ if (error) {
69598+ vma->vm_flags = oldflags;
69599+ goto fail;
69600+ }
69601+ }
69602+ }
69603+#endif
69604+
69605 /*
69606 * First try to merge with previous and/or next vma.
69607 */
69608@@ -204,9 +306,21 @@ success:
69609 * vm_flags and vm_page_prot are protected by the mmap_sem
69610 * held in write mode.
69611 */
69612+
69613+#ifdef CONFIG_PAX_SEGMEXEC
69614+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69615+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69616+#endif
69617+
69618 vma->vm_flags = newflags;
69619+
69620+#ifdef CONFIG_PAX_MPROTECT
69621+ if (mm->binfmt && mm->binfmt->handle_mprotect)
69622+ mm->binfmt->handle_mprotect(vma, newflags);
69623+#endif
69624+
69625 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69626- vm_get_page_prot(newflags));
69627+ vm_get_page_prot(vma->vm_flags));
69628
69629 if (vma_wants_writenotify(vma)) {
69630 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69631@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
69632 end = start + len;
69633 if (end <= start)
69634 return -ENOMEM;
69635+
69636+#ifdef CONFIG_PAX_SEGMEXEC
69637+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69638+ if (end > SEGMEXEC_TASK_SIZE)
69639+ return -EINVAL;
69640+ } else
69641+#endif
69642+
69643+ if (end > TASK_SIZE)
69644+ return -EINVAL;
69645+
69646 if (!arch_validate_prot(prot))
69647 return -EINVAL;
69648
69649@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
69650 /*
69651 * Does the application expect PROT_READ to imply PROT_EXEC:
69652 */
69653- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69654+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69655 prot |= PROT_EXEC;
69656
69657 vm_flags = calc_vm_prot_bits(prot);
69658@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
69659 if (start > vma->vm_start)
69660 prev = vma;
69661
69662+#ifdef CONFIG_PAX_MPROTECT
69663+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69664+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
69665+#endif
69666+
69667 for (nstart = start ; ; ) {
69668 unsigned long newflags;
69669
69670@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
69671
69672 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69673 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69674+ if (prot & (PROT_WRITE | PROT_EXEC))
69675+ gr_log_rwxmprotect(vma->vm_file);
69676+
69677+ error = -EACCES;
69678+ goto out;
69679+ }
69680+
69681+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69682 error = -EACCES;
69683 goto out;
69684 }
69685@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
69686 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69687 if (error)
69688 goto out;
69689+
69690+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
69691+
69692 nstart = tmp;
69693
69694 if (nstart < prev->vm_end)
69695diff --git a/mm/mremap.c b/mm/mremap.c
69696index d6959cb..18a402a 100644
69697--- a/mm/mremap.c
69698+++ b/mm/mremap.c
69699@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
69700 continue;
69701 pte = ptep_get_and_clear(mm, old_addr, old_pte);
69702 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69703+
69704+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69705+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69706+ pte = pte_exprotect(pte);
69707+#endif
69708+
69709 set_pte_at(mm, new_addr, new_pte, pte);
69710 }
69711
69712@@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
69713 if (is_vm_hugetlb_page(vma))
69714 goto Einval;
69715
69716+#ifdef CONFIG_PAX_SEGMEXEC
69717+ if (pax_find_mirror_vma(vma))
69718+ goto Einval;
69719+#endif
69720+
69721 /* We can't remap across vm area boundaries */
69722 if (old_len > vma->vm_end - addr)
69723 goto Efault;
69724@@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr,
69725 unsigned long ret = -EINVAL;
69726 unsigned long charged = 0;
69727 unsigned long map_flags;
69728+ unsigned long pax_task_size = TASK_SIZE;
69729
69730 if (new_addr & ~PAGE_MASK)
69731 goto out;
69732
69733- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69734+#ifdef CONFIG_PAX_SEGMEXEC
69735+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69736+ pax_task_size = SEGMEXEC_TASK_SIZE;
69737+#endif
69738+
69739+ pax_task_size -= PAGE_SIZE;
69740+
69741+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69742 goto out;
69743
69744 /* Check if the location we're moving into overlaps the
69745 * old location at all, and fail if it does.
69746 */
69747- if ((new_addr <= addr) && (new_addr+new_len) > addr)
69748- goto out;
69749-
69750- if ((addr <= new_addr) && (addr+old_len) > new_addr)
69751+ if (addr + old_len > new_addr && new_addr + new_len > addr)
69752 goto out;
69753
69754 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69755@@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr,
69756 struct vm_area_struct *vma;
69757 unsigned long ret = -EINVAL;
69758 unsigned long charged = 0;
69759+ unsigned long pax_task_size = TASK_SIZE;
69760
69761 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69762 goto out;
69763@@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr,
69764 if (!new_len)
69765 goto out;
69766
69767+#ifdef CONFIG_PAX_SEGMEXEC
69768+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69769+ pax_task_size = SEGMEXEC_TASK_SIZE;
69770+#endif
69771+
69772+ pax_task_size -= PAGE_SIZE;
69773+
69774+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69775+ old_len > pax_task_size || addr > pax_task_size-old_len)
69776+ goto out;
69777+
69778 if (flags & MREMAP_FIXED) {
69779 if (flags & MREMAP_MAYMOVE)
69780 ret = mremap_to(addr, old_len, new_addr, new_len);
69781@@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr,
69782 addr + new_len);
69783 }
69784 ret = addr;
69785+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69786 goto out;
69787 }
69788 }
69789@@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr,
69790 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69791 if (ret)
69792 goto out;
69793+
69794+ map_flags = vma->vm_flags;
69795 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69796+ if (!(ret & ~PAGE_MASK)) {
69797+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69798+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69799+ }
69800 }
69801 out:
69802 if (ret & ~PAGE_MASK)
69803diff --git a/mm/nobootmem.c b/mm/nobootmem.c
69804index 7fa41b4..6087460 100644
69805--- a/mm/nobootmem.c
69806+++ b/mm/nobootmem.c
69807@@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
69808 unsigned long __init free_all_memory_core_early(int nodeid)
69809 {
69810 int i;
69811- u64 start, end;
69812+ u64 start, end, startrange, endrange;
69813 unsigned long count = 0;
69814- struct range *range = NULL;
69815+ struct range *range = NULL, rangerange = { 0, 0 };
69816 int nr_range;
69817
69818 nr_range = get_free_all_memory_range(&range, nodeid);
69819+ startrange = __pa(range) >> PAGE_SHIFT;
69820+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
69821
69822 for (i = 0; i < nr_range; i++) {
69823 start = range[i].start;
69824 end = range[i].end;
69825+ if (start <= endrange && startrange < end) {
69826+ BUG_ON(rangerange.start | rangerange.end);
69827+ rangerange = range[i];
69828+ continue;
69829+ }
69830 count += end - start;
69831 __free_pages_memory(start, end);
69832 }
69833+ start = rangerange.start;
69834+ end = rangerange.end;
69835+ count += end - start;
69836+ __free_pages_memory(start, end);
69837
69838 return count;
69839 }
69840diff --git a/mm/nommu.c b/mm/nommu.c
69841index b982290..7d73f53 100644
69842--- a/mm/nommu.c
69843+++ b/mm/nommu.c
69844@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
69845 int sysctl_overcommit_ratio = 50; /* default is 50% */
69846 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69847 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69848-int heap_stack_gap = 0;
69849
69850 atomic_long_t mmap_pages_allocated;
69851
69852@@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
69853 EXPORT_SYMBOL(find_vma);
69854
69855 /*
69856- * find a VMA
69857- * - we don't extend stack VMAs under NOMMU conditions
69858- */
69859-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69860-{
69861- return find_vma(mm, addr);
69862-}
69863-
69864-/*
69865 * expand a stack to a given address
69866 * - not supported under NOMMU conditions
69867 */
69868@@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69869
69870 /* most fields are the same, copy all, and then fixup */
69871 *new = *vma;
69872+ INIT_LIST_HEAD(&new->anon_vma_chain);
69873 *region = *vma->vm_region;
69874 new->vm_region = region;
69875
69876diff --git a/mm/page_alloc.c b/mm/page_alloc.c
69877index 485be89..c059ad3 100644
69878--- a/mm/page_alloc.c
69879+++ b/mm/page_alloc.c
69880@@ -341,7 +341,7 @@ out:
69881 * This usage means that zero-order pages may not be compound.
69882 */
69883
69884-static void free_compound_page(struct page *page)
69885+void free_compound_page(struct page *page)
69886 {
69887 __free_pages_ok(page, compound_order(page));
69888 }
69889@@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
69890 int i;
69891 int bad = 0;
69892
69893+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69894+ unsigned long index = 1UL << order;
69895+#endif
69896+
69897 trace_mm_page_free_direct(page, order);
69898 kmemcheck_free_shadow(page, order);
69899
69900@@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
69901 debug_check_no_obj_freed(page_address(page),
69902 PAGE_SIZE << order);
69903 }
69904+
69905+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69906+ for (; index; --index)
69907+ sanitize_highpage(page + index - 1);
69908+#endif
69909+
69910 arch_free_page(page, order);
69911 kernel_map_pages(page, 1 << order, 0);
69912
69913@@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
69914 arch_alloc_page(page, order);
69915 kernel_map_pages(page, 1 << order, 1);
69916
69917+#ifndef CONFIG_PAX_MEMORY_SANITIZE
69918 if (gfp_flags & __GFP_ZERO)
69919 prep_zero_page(page, order, gfp_flags);
69920+#endif
69921
69922 if (order && (gfp_flags & __GFP_COMP))
69923 prep_compound_page(page, order);
69924@@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
69925 unsigned long pfn;
69926
69927 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
69928+#ifdef CONFIG_X86_32
69929+ /* boot failures in VMware 8 on 32bit vanilla since
69930+ this change */
69931+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
69932+#else
69933 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
69934+#endif
69935 return 1;
69936 }
69937 return 0;
69938diff --git a/mm/percpu.c b/mm/percpu.c
69939index 716eb4a..8d10419 100644
69940--- a/mm/percpu.c
69941+++ b/mm/percpu.c
69942@@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
69943 static unsigned int pcpu_high_unit_cpu __read_mostly;
69944
69945 /* the address of the first chunk which starts with the kernel static area */
69946-void *pcpu_base_addr __read_mostly;
69947+void *pcpu_base_addr __read_only;
69948 EXPORT_SYMBOL_GPL(pcpu_base_addr);
69949
69950 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
69951diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
69952index e920aa3..c19184f0 100644
69953--- a/mm/process_vm_access.c
69954+++ b/mm/process_vm_access.c
69955@@ -13,6 +13,7 @@
69956 #include <linux/uio.h>
69957 #include <linux/sched.h>
69958 #include <linux/highmem.h>
69959+#include <linux/security.h>
69960 #include <linux/ptrace.h>
69961 #include <linux/slab.h>
69962 #include <linux/syscalls.h>
69963@@ -264,13 +265,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
69964 */
69965 for (i = 0; i < riovcnt; i++) {
69966 iov_len = rvec[i].iov_len;
69967- if (iov_len > 0) {
69968- nr_pages_iov = ((unsigned long)rvec[i].iov_base
69969- + iov_len)
69970- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
69971- / PAGE_SIZE + 1;
69972- nr_pages = max(nr_pages, nr_pages_iov);
69973- }
69974+ if (iov_len <= 0)
69975+ continue;
69976+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
69977+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
69978+ nr_pages = max(nr_pages, nr_pages_iov);
69979 }
69980
69981 if (nr_pages == 0)
69982@@ -298,8 +297,13 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
69983 goto free_proc_pages;
69984 }
69985
69986+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
69987+ rc = -EPERM;
69988+ goto put_task_struct;
69989+ }
69990+
69991 task_lock(task);
69992- if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
69993+ if (ptrace_may_access_nolock(task, PTRACE_MODE_ATTACH)) {
69994 task_unlock(task);
69995 rc = -EPERM;
69996 goto put_task_struct;
69997diff --git a/mm/rmap.c b/mm/rmap.c
69998index a4fd368..e0ffec7 100644
69999--- a/mm/rmap.c
70000+++ b/mm/rmap.c
70001@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70002 struct anon_vma *anon_vma = vma->anon_vma;
70003 struct anon_vma_chain *avc;
70004
70005+#ifdef CONFIG_PAX_SEGMEXEC
70006+ struct anon_vma_chain *avc_m = NULL;
70007+#endif
70008+
70009 might_sleep();
70010 if (unlikely(!anon_vma)) {
70011 struct mm_struct *mm = vma->vm_mm;
70012@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70013 if (!avc)
70014 goto out_enomem;
70015
70016+#ifdef CONFIG_PAX_SEGMEXEC
70017+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70018+ if (!avc_m)
70019+ goto out_enomem_free_avc;
70020+#endif
70021+
70022 anon_vma = find_mergeable_anon_vma(vma);
70023 allocated = NULL;
70024 if (!anon_vma) {
70025@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70026 /* page_table_lock to protect against threads */
70027 spin_lock(&mm->page_table_lock);
70028 if (likely(!vma->anon_vma)) {
70029+
70030+#ifdef CONFIG_PAX_SEGMEXEC
70031+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70032+
70033+ if (vma_m) {
70034+ BUG_ON(vma_m->anon_vma);
70035+ vma_m->anon_vma = anon_vma;
70036+ avc_m->anon_vma = anon_vma;
70037+ avc_m->vma = vma;
70038+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70039+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
70040+ avc_m = NULL;
70041+ }
70042+#endif
70043+
70044 vma->anon_vma = anon_vma;
70045 avc->anon_vma = anon_vma;
70046 avc->vma = vma;
70047@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70048
70049 if (unlikely(allocated))
70050 put_anon_vma(allocated);
70051+
70052+#ifdef CONFIG_PAX_SEGMEXEC
70053+ if (unlikely(avc_m))
70054+ anon_vma_chain_free(avc_m);
70055+#endif
70056+
70057 if (unlikely(avc))
70058 anon_vma_chain_free(avc);
70059 }
70060 return 0;
70061
70062 out_enomem_free_avc:
70063+
70064+#ifdef CONFIG_PAX_SEGMEXEC
70065+ if (avc_m)
70066+ anon_vma_chain_free(avc_m);
70067+#endif
70068+
70069 anon_vma_chain_free(avc);
70070 out_enomem:
70071 return -ENOMEM;
70072@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
70073 * Attach the anon_vmas from src to dst.
70074 * Returns 0 on success, -ENOMEM on failure.
70075 */
70076-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70077+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70078 {
70079 struct anon_vma_chain *avc, *pavc;
70080 struct anon_vma *root = NULL;
70081@@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70082 * the corresponding VMA in the parent process is attached to.
70083 * Returns 0 on success, non-zero on failure.
70084 */
70085-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70086+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70087 {
70088 struct anon_vma_chain *avc;
70089 struct anon_vma *anon_vma;
70090diff --git a/mm/shmem.c b/mm/shmem.c
70091index 6c253f7..367e20a 100644
70092--- a/mm/shmem.c
70093+++ b/mm/shmem.c
70094@@ -31,7 +31,7 @@
70095 #include <linux/export.h>
70096 #include <linux/swap.h>
70097
70098-static struct vfsmount *shm_mnt;
70099+struct vfsmount *shm_mnt;
70100
70101 #ifdef CONFIG_SHMEM
70102 /*
70103@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70104 #define BOGO_DIRENT_SIZE 20
70105
70106 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70107-#define SHORT_SYMLINK_LEN 128
70108+#define SHORT_SYMLINK_LEN 64
70109
70110 struct shmem_xattr {
70111 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70112@@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
70113 int err = -ENOMEM;
70114
70115 /* Round up to L1_CACHE_BYTES to resist false sharing */
70116- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70117- L1_CACHE_BYTES), GFP_KERNEL);
70118+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70119 if (!sbinfo)
70120 return -ENOMEM;
70121
70122diff --git a/mm/slab.c b/mm/slab.c
70123index 83311c9a..fcf8f86 100644
70124--- a/mm/slab.c
70125+++ b/mm/slab.c
70126@@ -151,7 +151,7 @@
70127
70128 /* Legal flag mask for kmem_cache_create(). */
70129 #if DEBUG
70130-# define CREATE_MASK (SLAB_RED_ZONE | \
70131+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70132 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70133 SLAB_CACHE_DMA | \
70134 SLAB_STORE_USER | \
70135@@ -159,7 +159,7 @@
70136 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70137 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70138 #else
70139-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70140+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70141 SLAB_CACHE_DMA | \
70142 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70143 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70144@@ -288,7 +288,7 @@ struct kmem_list3 {
70145 * Need this for bootstrapping a per node allocator.
70146 */
70147 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70148-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70149+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70150 #define CACHE_CACHE 0
70151 #define SIZE_AC MAX_NUMNODES
70152 #define SIZE_L3 (2 * MAX_NUMNODES)
70153@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
70154 if ((x)->max_freeable < i) \
70155 (x)->max_freeable = i; \
70156 } while (0)
70157-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70158-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70159-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70160-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70161+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70162+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70163+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70164+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70165 #else
70166 #define STATS_INC_ACTIVE(x) do { } while (0)
70167 #define STATS_DEC_ACTIVE(x) do { } while (0)
70168@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
70169 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70170 */
70171 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70172- const struct slab *slab, void *obj)
70173+ const struct slab *slab, const void *obj)
70174 {
70175 u32 offset = (obj - slab->s_mem);
70176 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70177@@ -564,7 +564,7 @@ struct cache_names {
70178 static struct cache_names __initdata cache_names[] = {
70179 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70180 #include <linux/kmalloc_sizes.h>
70181- {NULL,}
70182+ {NULL}
70183 #undef CACHE
70184 };
70185
70186@@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
70187 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70188 sizes[INDEX_AC].cs_size,
70189 ARCH_KMALLOC_MINALIGN,
70190- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70191+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70192 NULL);
70193
70194 if (INDEX_AC != INDEX_L3) {
70195@@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
70196 kmem_cache_create(names[INDEX_L3].name,
70197 sizes[INDEX_L3].cs_size,
70198 ARCH_KMALLOC_MINALIGN,
70199- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70200+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70201 NULL);
70202 }
70203
70204@@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
70205 sizes->cs_cachep = kmem_cache_create(names->name,
70206 sizes->cs_size,
70207 ARCH_KMALLOC_MINALIGN,
70208- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70209+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70210 NULL);
70211 }
70212 #ifdef CONFIG_ZONE_DMA
70213@@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
70214 }
70215 /* cpu stats */
70216 {
70217- unsigned long allochit = atomic_read(&cachep->allochit);
70218- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70219- unsigned long freehit = atomic_read(&cachep->freehit);
70220- unsigned long freemiss = atomic_read(&cachep->freemiss);
70221+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70222+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70223+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70224+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70225
70226 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70227 allochit, allocmiss, freehit, freemiss);
70228@@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
70229 {
70230 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
70231 #ifdef CONFIG_DEBUG_SLAB_LEAK
70232- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70233+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
70234 #endif
70235 return 0;
70236 }
70237 module_init(slab_proc_init);
70238 #endif
70239
70240+void check_object_size(const void *ptr, unsigned long n, bool to)
70241+{
70242+
70243+#ifdef CONFIG_PAX_USERCOPY
70244+ struct page *page;
70245+ struct kmem_cache *cachep = NULL;
70246+ struct slab *slabp;
70247+ unsigned int objnr;
70248+ unsigned long offset;
70249+ const char *type;
70250+
70251+ if (!n)
70252+ return;
70253+
70254+ type = "<null>";
70255+ if (ZERO_OR_NULL_PTR(ptr))
70256+ goto report;
70257+
70258+ if (!virt_addr_valid(ptr))
70259+ return;
70260+
70261+ page = virt_to_head_page(ptr);
70262+
70263+ type = "<process stack>";
70264+ if (!PageSlab(page)) {
70265+ if (object_is_on_stack(ptr, n) == -1)
70266+ goto report;
70267+ return;
70268+ }
70269+
70270+ cachep = page_get_cache(page);
70271+ type = cachep->name;
70272+ if (!(cachep->flags & SLAB_USERCOPY))
70273+ goto report;
70274+
70275+ slabp = page_get_slab(page);
70276+ objnr = obj_to_index(cachep, slabp, ptr);
70277+ BUG_ON(objnr >= cachep->num);
70278+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70279+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70280+ return;
70281+
70282+report:
70283+ pax_report_usercopy(ptr, n, to, type);
70284+#endif
70285+
70286+}
70287+EXPORT_SYMBOL(check_object_size);
70288+
70289 /**
70290 * ksize - get the actual amount of memory allocated for a given object
70291 * @objp: Pointer to the object
70292diff --git a/mm/slob.c b/mm/slob.c
70293index 8105be4..579da9d 100644
70294--- a/mm/slob.c
70295+++ b/mm/slob.c
70296@@ -29,7 +29,7 @@
70297 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70298 * alloc_pages() directly, allocating compound pages so the page order
70299 * does not have to be separately tracked, and also stores the exact
70300- * allocation size in page->private so that it can be used to accurately
70301+ * allocation size in slob_page->size so that it can be used to accurately
70302 * provide ksize(). These objects are detected in kfree() because slob_page()
70303 * is false for them.
70304 *
70305@@ -58,6 +58,7 @@
70306 */
70307
70308 #include <linux/kernel.h>
70309+#include <linux/sched.h>
70310 #include <linux/slab.h>
70311 #include <linux/mm.h>
70312 #include <linux/swap.h> /* struct reclaim_state */
70313@@ -102,7 +103,8 @@ struct slob_page {
70314 unsigned long flags; /* mandatory */
70315 atomic_t _count; /* mandatory */
70316 slobidx_t units; /* free units left in page */
70317- unsigned long pad[2];
70318+ unsigned long pad[1];
70319+ unsigned long size; /* size when >=PAGE_SIZE */
70320 slob_t *free; /* first free slob_t in page */
70321 struct list_head list; /* linked list of free pages */
70322 };
70323@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
70324 */
70325 static inline int is_slob_page(struct slob_page *sp)
70326 {
70327- return PageSlab((struct page *)sp);
70328+ return PageSlab((struct page *)sp) && !sp->size;
70329 }
70330
70331 static inline void set_slob_page(struct slob_page *sp)
70332@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
70333
70334 static inline struct slob_page *slob_page(const void *addr)
70335 {
70336- return (struct slob_page *)virt_to_page(addr);
70337+ return (struct slob_page *)virt_to_head_page(addr);
70338 }
70339
70340 /*
70341@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
70342 /*
70343 * Return the size of a slob block.
70344 */
70345-static slobidx_t slob_units(slob_t *s)
70346+static slobidx_t slob_units(const slob_t *s)
70347 {
70348 if (s->units > 0)
70349 return s->units;
70350@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
70351 /*
70352 * Return the next free slob block pointer after this one.
70353 */
70354-static slob_t *slob_next(slob_t *s)
70355+static slob_t *slob_next(const slob_t *s)
70356 {
70357 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70358 slobidx_t next;
70359@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70360 /*
70361 * Returns true if s is the last free block in its page.
70362 */
70363-static int slob_last(slob_t *s)
70364+static int slob_last(const slob_t *s)
70365 {
70366 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70367 }
70368@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
70369 if (!page)
70370 return NULL;
70371
70372+ set_slob_page(page);
70373 return page_address(page);
70374 }
70375
70376@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
70377 if (!b)
70378 return NULL;
70379 sp = slob_page(b);
70380- set_slob_page(sp);
70381
70382 spin_lock_irqsave(&slob_lock, flags);
70383 sp->units = SLOB_UNITS(PAGE_SIZE);
70384 sp->free = b;
70385+ sp->size = 0;
70386 INIT_LIST_HEAD(&sp->list);
70387 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70388 set_slob_page_free(sp, slob_list);
70389@@ -476,10 +479,9 @@ out:
70390 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70391 */
70392
70393-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70394+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70395 {
70396- unsigned int *m;
70397- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70398+ slob_t *m;
70399 void *ret;
70400
70401 gfp &= gfp_allowed_mask;
70402@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70403
70404 if (!m)
70405 return NULL;
70406- *m = size;
70407+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70408+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70409+ m[0].units = size;
70410+ m[1].units = align;
70411 ret = (void *)m + align;
70412
70413 trace_kmalloc_node(_RET_IP_, ret,
70414@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70415 gfp |= __GFP_COMP;
70416 ret = slob_new_pages(gfp, order, node);
70417 if (ret) {
70418- struct page *page;
70419- page = virt_to_page(ret);
70420- page->private = size;
70421+ struct slob_page *sp;
70422+ sp = slob_page(ret);
70423+ sp->size = size;
70424 }
70425
70426 trace_kmalloc_node(_RET_IP_, ret,
70427 size, PAGE_SIZE << order, gfp, node);
70428 }
70429
70430- kmemleak_alloc(ret, size, 1, gfp);
70431+ return ret;
70432+}
70433+
70434+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70435+{
70436+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70437+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70438+
70439+ if (!ZERO_OR_NULL_PTR(ret))
70440+ kmemleak_alloc(ret, size, 1, gfp);
70441 return ret;
70442 }
70443 EXPORT_SYMBOL(__kmalloc_node);
70444@@ -533,13 +547,92 @@ void kfree(const void *block)
70445 sp = slob_page(block);
70446 if (is_slob_page(sp)) {
70447 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70448- unsigned int *m = (unsigned int *)(block - align);
70449- slob_free(m, *m + align);
70450- } else
70451+ slob_t *m = (slob_t *)(block - align);
70452+ slob_free(m, m[0].units + align);
70453+ } else {
70454+ clear_slob_page(sp);
70455+ free_slob_page(sp);
70456+ sp->size = 0;
70457 put_page(&sp->page);
70458+ }
70459 }
70460 EXPORT_SYMBOL(kfree);
70461
70462+void check_object_size(const void *ptr, unsigned long n, bool to)
70463+{
70464+
70465+#ifdef CONFIG_PAX_USERCOPY
70466+ struct slob_page *sp;
70467+ const slob_t *free;
70468+ const void *base;
70469+ unsigned long flags;
70470+ const char *type;
70471+
70472+ if (!n)
70473+ return;
70474+
70475+ type = "<null>";
70476+ if (ZERO_OR_NULL_PTR(ptr))
70477+ goto report;
70478+
70479+ if (!virt_addr_valid(ptr))
70480+ return;
70481+
70482+ type = "<process stack>";
70483+ sp = slob_page(ptr);
70484+ if (!PageSlab((struct page*)sp)) {
70485+ if (object_is_on_stack(ptr, n) == -1)
70486+ goto report;
70487+ return;
70488+ }
70489+
70490+ type = "<slob>";
70491+ if (sp->size) {
70492+ base = page_address(&sp->page);
70493+ if (base <= ptr && n <= sp->size - (ptr - base))
70494+ return;
70495+ goto report;
70496+ }
70497+
70498+ /* some tricky double walking to find the chunk */
70499+ spin_lock_irqsave(&slob_lock, flags);
70500+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70501+ free = sp->free;
70502+
70503+ while (!slob_last(free) && (void *)free <= ptr) {
70504+ base = free + slob_units(free);
70505+ free = slob_next(free);
70506+ }
70507+
70508+ while (base < (void *)free) {
70509+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70510+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70511+ int offset;
70512+
70513+ if (ptr < base + align)
70514+ break;
70515+
70516+ offset = ptr - base - align;
70517+ if (offset >= m) {
70518+ base += size;
70519+ continue;
70520+ }
70521+
70522+ if (n > m - offset)
70523+ break;
70524+
70525+ spin_unlock_irqrestore(&slob_lock, flags);
70526+ return;
70527+ }
70528+
70529+ spin_unlock_irqrestore(&slob_lock, flags);
70530+report:
70531+ pax_report_usercopy(ptr, n, to, type);
70532+#endif
70533+
70534+}
70535+EXPORT_SYMBOL(check_object_size);
70536+
70537 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70538 size_t ksize(const void *block)
70539 {
70540@@ -552,10 +645,10 @@ size_t ksize(const void *block)
70541 sp = slob_page(block);
70542 if (is_slob_page(sp)) {
70543 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70544- unsigned int *m = (unsigned int *)(block - align);
70545- return SLOB_UNITS(*m) * SLOB_UNIT;
70546+ slob_t *m = (slob_t *)(block - align);
70547+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70548 } else
70549- return sp->page.private;
70550+ return sp->size;
70551 }
70552 EXPORT_SYMBOL(ksize);
70553
70554@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
70555 {
70556 struct kmem_cache *c;
70557
70558+#ifdef CONFIG_PAX_USERCOPY
70559+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
70560+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70561+#else
70562 c = slob_alloc(sizeof(struct kmem_cache),
70563 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70564+#endif
70565
70566 if (c) {
70567 c->name = name;
70568@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
70569
70570 lockdep_trace_alloc(flags);
70571
70572+#ifdef CONFIG_PAX_USERCOPY
70573+ b = __kmalloc_node_align(c->size, flags, node, c->align);
70574+#else
70575 if (c->size < PAGE_SIZE) {
70576 b = slob_alloc(c->size, flags, c->align, node);
70577 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70578 SLOB_UNITS(c->size) * SLOB_UNIT,
70579 flags, node);
70580 } else {
70581+ struct slob_page *sp;
70582+
70583 b = slob_new_pages(flags, get_order(c->size), node);
70584+ sp = slob_page(b);
70585+ sp->size = c->size;
70586 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70587 PAGE_SIZE << get_order(c->size),
70588 flags, node);
70589 }
70590+#endif
70591
70592 if (c->ctor)
70593 c->ctor(b);
70594@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70595
70596 static void __kmem_cache_free(void *b, int size)
70597 {
70598- if (size < PAGE_SIZE)
70599+ struct slob_page *sp = slob_page(b);
70600+
70601+ if (is_slob_page(sp))
70602 slob_free(b, size);
70603- else
70604+ else {
70605+ clear_slob_page(sp);
70606+ free_slob_page(sp);
70607+ sp->size = 0;
70608 slob_free_pages(b, get_order(size));
70609+ }
70610 }
70611
70612 static void kmem_rcu_free(struct rcu_head *head)
70613@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
70614
70615 void kmem_cache_free(struct kmem_cache *c, void *b)
70616 {
70617+ int size = c->size;
70618+
70619+#ifdef CONFIG_PAX_USERCOPY
70620+ if (size + c->align < PAGE_SIZE) {
70621+ size += c->align;
70622+ b -= c->align;
70623+ }
70624+#endif
70625+
70626 kmemleak_free_recursive(b, c->flags);
70627 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70628 struct slob_rcu *slob_rcu;
70629- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70630- slob_rcu->size = c->size;
70631+ slob_rcu = b + (size - sizeof(struct slob_rcu));
70632+ slob_rcu->size = size;
70633 call_rcu(&slob_rcu->head, kmem_rcu_free);
70634 } else {
70635- __kmem_cache_free(b, c->size);
70636+ __kmem_cache_free(b, size);
70637 }
70638
70639+#ifdef CONFIG_PAX_USERCOPY
70640+ trace_kfree(_RET_IP_, b);
70641+#else
70642 trace_kmem_cache_free(_RET_IP_, b);
70643+#endif
70644+
70645 }
70646 EXPORT_SYMBOL(kmem_cache_free);
70647
70648diff --git a/mm/slub.c b/mm/slub.c
70649index 1a919f0..1739c9b 100644
70650--- a/mm/slub.c
70651+++ b/mm/slub.c
70652@@ -208,7 +208,7 @@ struct track {
70653
70654 enum track_item { TRACK_ALLOC, TRACK_FREE };
70655
70656-#ifdef CONFIG_SYSFS
70657+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70658 static int sysfs_slab_add(struct kmem_cache *);
70659 static int sysfs_slab_alias(struct kmem_cache *, const char *);
70660 static void sysfs_slab_remove(struct kmem_cache *);
70661@@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
70662 if (!t->addr)
70663 return;
70664
70665- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70666+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70667 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70668 #ifdef CONFIG_STACKTRACE
70669 {
70670@@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
70671
70672 page = virt_to_head_page(x);
70673
70674+ BUG_ON(!PageSlab(page));
70675+
70676 slab_free(s, page, x, _RET_IP_);
70677
70678 trace_kmem_cache_free(_RET_IP_, x);
70679@@ -2592,7 +2594,7 @@ static int slub_min_objects;
70680 * Merge control. If this is set then no merging of slab caches will occur.
70681 * (Could be removed. This was introduced to pacify the merge skeptics.)
70682 */
70683-static int slub_nomerge;
70684+static int slub_nomerge = 1;
70685
70686 /*
70687 * Calculate the order of allocation given an slab object size.
70688@@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
70689 else
70690 s->cpu_partial = 30;
70691
70692- s->refcount = 1;
70693+ atomic_set(&s->refcount, 1);
70694 #ifdef CONFIG_NUMA
70695 s->remote_node_defrag_ratio = 1000;
70696 #endif
70697@@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
70698 void kmem_cache_destroy(struct kmem_cache *s)
70699 {
70700 down_write(&slub_lock);
70701- s->refcount--;
70702- if (!s->refcount) {
70703+ if (atomic_dec_and_test(&s->refcount)) {
70704 list_del(&s->list);
70705 up_write(&slub_lock);
70706 if (kmem_cache_close(s)) {
70707@@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
70708 EXPORT_SYMBOL(__kmalloc_node);
70709 #endif
70710
70711+void check_object_size(const void *ptr, unsigned long n, bool to)
70712+{
70713+
70714+#ifdef CONFIG_PAX_USERCOPY
70715+ struct page *page;
70716+ struct kmem_cache *s = NULL;
70717+ unsigned long offset;
70718+ const char *type;
70719+
70720+ if (!n)
70721+ return;
70722+
70723+ type = "<null>";
70724+ if (ZERO_OR_NULL_PTR(ptr))
70725+ goto report;
70726+
70727+ if (!virt_addr_valid(ptr))
70728+ return;
70729+
70730+ page = virt_to_head_page(ptr);
70731+
70732+ type = "<process stack>";
70733+ if (!PageSlab(page)) {
70734+ if (object_is_on_stack(ptr, n) == -1)
70735+ goto report;
70736+ return;
70737+ }
70738+
70739+ s = page->slab;
70740+ type = s->name;
70741+ if (!(s->flags & SLAB_USERCOPY))
70742+ goto report;
70743+
70744+ offset = (ptr - page_address(page)) % s->size;
70745+ if (offset <= s->objsize && n <= s->objsize - offset)
70746+ return;
70747+
70748+report:
70749+ pax_report_usercopy(ptr, n, to, type);
70750+#endif
70751+
70752+}
70753+EXPORT_SYMBOL(check_object_size);
70754+
70755 size_t ksize(const void *object)
70756 {
70757 struct page *page;
70758@@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
70759 int node;
70760
70761 list_add(&s->list, &slab_caches);
70762- s->refcount = -1;
70763+ atomic_set(&s->refcount, -1);
70764
70765 for_each_node_state(node, N_NORMAL_MEMORY) {
70766 struct kmem_cache_node *n = get_node(s, node);
70767@@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void)
70768
70769 /* Caches that are not of the two-to-the-power-of size */
70770 if (KMALLOC_MIN_SIZE <= 32) {
70771- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
70772+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
70773 caches++;
70774 }
70775
70776 if (KMALLOC_MIN_SIZE <= 64) {
70777- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
70778+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
70779 caches++;
70780 }
70781
70782 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70783- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
70784+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
70785 caches++;
70786 }
70787
70788@@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s)
70789 /*
70790 * We may have set a slab to be unmergeable during bootstrap.
70791 */
70792- if (s->refcount < 0)
70793+ if (atomic_read(&s->refcount) < 0)
70794 return 1;
70795
70796 return 0;
70797@@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
70798 down_write(&slub_lock);
70799 s = find_mergeable(size, align, flags, name, ctor);
70800 if (s) {
70801- s->refcount++;
70802+ atomic_inc(&s->refcount);
70803 /*
70804 * Adjust the object sizes so that we clear
70805 * the complete object on kzalloc.
70806@@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
70807 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
70808
70809 if (sysfs_slab_alias(s, name)) {
70810- s->refcount--;
70811+ atomic_dec(&s->refcount);
70812 goto err;
70813 }
70814 up_write(&slub_lock);
70815@@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
70816 }
70817 #endif
70818
70819-#ifdef CONFIG_SYSFS
70820+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70821 static int count_inuse(struct page *page)
70822 {
70823 return page->inuse;
70824@@ -4410,12 +4455,12 @@ static void resiliency_test(void)
70825 validate_slab_cache(kmalloc_caches[9]);
70826 }
70827 #else
70828-#ifdef CONFIG_SYSFS
70829+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70830 static void resiliency_test(void) {};
70831 #endif
70832 #endif
70833
70834-#ifdef CONFIG_SYSFS
70835+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70836 enum slab_stat_type {
70837 SL_ALL, /* All slabs */
70838 SL_PARTIAL, /* Only partially allocated slabs */
70839@@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor);
70840
70841 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70842 {
70843- return sprintf(buf, "%d\n", s->refcount - 1);
70844+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70845 }
70846 SLAB_ATTR_RO(aliases);
70847
70848@@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s)
70849 return name;
70850 }
70851
70852+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70853 static int sysfs_slab_add(struct kmem_cache *s)
70854 {
70855 int err;
70856@@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
70857 kobject_del(&s->kobj);
70858 kobject_put(&s->kobj);
70859 }
70860+#endif
70861
70862 /*
70863 * Need to buffer aliases during bootup until sysfs becomes
70864@@ -5298,6 +5345,7 @@ struct saved_alias {
70865
70866 static struct saved_alias *alias_list;
70867
70868+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70869 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
70870 {
70871 struct saved_alias *al;
70872@@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
70873 alias_list = al;
70874 return 0;
70875 }
70876+#endif
70877
70878 static int __init slab_sysfs_init(void)
70879 {
70880diff --git a/mm/swap.c b/mm/swap.c
70881index a91caf7..b887e735 100644
70882--- a/mm/swap.c
70883+++ b/mm/swap.c
70884@@ -31,6 +31,7 @@
70885 #include <linux/backing-dev.h>
70886 #include <linux/memcontrol.h>
70887 #include <linux/gfp.h>
70888+#include <linux/hugetlb.h>
70889
70890 #include "internal.h"
70891
70892@@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
70893
70894 __page_cache_release(page);
70895 dtor = get_compound_page_dtor(page);
70896+ if (!PageHuge(page))
70897+ BUG_ON(dtor != free_compound_page);
70898 (*dtor)(page);
70899 }
70900
70901diff --git a/mm/swapfile.c b/mm/swapfile.c
70902index b1cd120..aaae885 100644
70903--- a/mm/swapfile.c
70904+++ b/mm/swapfile.c
70905@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
70906
70907 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
70908 /* Activity counter to indicate that a swapon or swapoff has occurred */
70909-static atomic_t proc_poll_event = ATOMIC_INIT(0);
70910+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
70911
70912 static inline unsigned char swap_count(unsigned char ent)
70913 {
70914@@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
70915 }
70916 filp_close(swap_file, NULL);
70917 err = 0;
70918- atomic_inc(&proc_poll_event);
70919+ atomic_inc_unchecked(&proc_poll_event);
70920 wake_up_interruptible(&proc_poll_wait);
70921
70922 out_dput:
70923@@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
70924
70925 poll_wait(file, &proc_poll_wait, wait);
70926
70927- if (seq->poll_event != atomic_read(&proc_poll_event)) {
70928- seq->poll_event = atomic_read(&proc_poll_event);
70929+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
70930+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
70931 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
70932 }
70933
70934@@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file)
70935 return ret;
70936
70937 seq = file->private_data;
70938- seq->poll_event = atomic_read(&proc_poll_event);
70939+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
70940 return 0;
70941 }
70942
70943@@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
70944 (p->flags & SWP_DISCARDABLE) ? "D" : "");
70945
70946 mutex_unlock(&swapon_mutex);
70947- atomic_inc(&proc_poll_event);
70948+ atomic_inc_unchecked(&proc_poll_event);
70949 wake_up_interruptible(&proc_poll_wait);
70950
70951 if (S_ISREG(inode->i_mode))
70952diff --git a/mm/util.c b/mm/util.c
70953index 136ac4f..5117eef 100644
70954--- a/mm/util.c
70955+++ b/mm/util.c
70956@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
70957 * allocated buffer. Use this if you don't want to free the buffer immediately
70958 * like, for example, with RCU.
70959 */
70960+#undef __krealloc
70961 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
70962 {
70963 void *ret;
70964@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
70965 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
70966 * %NULL pointer, the object pointed to is freed.
70967 */
70968+#undef krealloc
70969 void *krealloc(const void *p, size_t new_size, gfp_t flags)
70970 {
70971 void *ret;
70972@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
70973 void arch_pick_mmap_layout(struct mm_struct *mm)
70974 {
70975 mm->mmap_base = TASK_UNMAPPED_BASE;
70976+
70977+#ifdef CONFIG_PAX_RANDMMAP
70978+ if (mm->pax_flags & MF_PAX_RANDMMAP)
70979+ mm->mmap_base += mm->delta_mmap;
70980+#endif
70981+
70982 mm->get_unmapped_area = arch_get_unmapped_area;
70983 mm->unmap_area = arch_unmap_area;
70984 }
70985diff --git a/mm/vmalloc.c b/mm/vmalloc.c
70986index 27be2f0..0aef2c2 100644
70987--- a/mm/vmalloc.c
70988+++ b/mm/vmalloc.c
70989@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
70990
70991 pte = pte_offset_kernel(pmd, addr);
70992 do {
70993- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70994- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70995+
70996+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70997+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
70998+ BUG_ON(!pte_exec(*pte));
70999+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71000+ continue;
71001+ }
71002+#endif
71003+
71004+ {
71005+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71006+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71007+ }
71008 } while (pte++, addr += PAGE_SIZE, addr != end);
71009 }
71010
71011@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71012 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71013 {
71014 pte_t *pte;
71015+ int ret = -ENOMEM;
71016
71017 /*
71018 * nr is a running index into the array which helps higher level
71019@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71020 pte = pte_alloc_kernel(pmd, addr);
71021 if (!pte)
71022 return -ENOMEM;
71023+
71024+ pax_open_kernel();
71025 do {
71026 struct page *page = pages[*nr];
71027
71028- if (WARN_ON(!pte_none(*pte)))
71029- return -EBUSY;
71030- if (WARN_ON(!page))
71031- return -ENOMEM;
71032+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71033+ if (pgprot_val(prot) & _PAGE_NX)
71034+#endif
71035+
71036+ if (WARN_ON(!pte_none(*pte))) {
71037+ ret = -EBUSY;
71038+ goto out;
71039+ }
71040+ if (WARN_ON(!page)) {
71041+ ret = -ENOMEM;
71042+ goto out;
71043+ }
71044 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71045 (*nr)++;
71046 } while (pte++, addr += PAGE_SIZE, addr != end);
71047- return 0;
71048+ ret = 0;
71049+out:
71050+ pax_close_kernel();
71051+ return ret;
71052 }
71053
71054 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71055@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
71056 * and fall back on vmalloc() if that fails. Others
71057 * just put it in the vmalloc space.
71058 */
71059-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71060+#ifdef CONFIG_MODULES
71061+#ifdef MODULES_VADDR
71062 unsigned long addr = (unsigned long)x;
71063 if (addr >= MODULES_VADDR && addr < MODULES_END)
71064 return 1;
71065 #endif
71066+
71067+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71068+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71069+ return 1;
71070+#endif
71071+
71072+#endif
71073+
71074 return is_vmalloc_addr(x);
71075 }
71076
71077@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
71078
71079 if (!pgd_none(*pgd)) {
71080 pud_t *pud = pud_offset(pgd, addr);
71081+#ifdef CONFIG_X86
71082+ if (!pud_large(*pud))
71083+#endif
71084 if (!pud_none(*pud)) {
71085 pmd_t *pmd = pmd_offset(pud, addr);
71086+#ifdef CONFIG_X86
71087+ if (!pmd_large(*pmd))
71088+#endif
71089 if (!pmd_none(*pmd)) {
71090 pte_t *ptep, pte;
71091
71092@@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
71093 struct vm_struct *area;
71094
71095 BUG_ON(in_interrupt());
71096+
71097+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71098+ if (flags & VM_KERNEXEC) {
71099+ if (start != VMALLOC_START || end != VMALLOC_END)
71100+ return NULL;
71101+ start = (unsigned long)MODULES_EXEC_VADDR;
71102+ end = (unsigned long)MODULES_EXEC_END;
71103+ }
71104+#endif
71105+
71106 if (flags & VM_IOREMAP) {
71107 int bit = fls(size);
71108
71109@@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
71110 if (count > totalram_pages)
71111 return NULL;
71112
71113+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71114+ if (!(pgprot_val(prot) & _PAGE_NX))
71115+ flags |= VM_KERNEXEC;
71116+#endif
71117+
71118 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71119 __builtin_return_address(0));
71120 if (!area)
71121@@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
71122 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71123 goto fail;
71124
71125+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71126+ if (!(pgprot_val(prot) & _PAGE_NX))
71127+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71128+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71129+ else
71130+#endif
71131+
71132 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71133 start, end, node, gfp_mask, caller);
71134 if (!area)
71135@@ -1679,6 +1741,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
71136 gfp_mask, prot, node, caller);
71137 }
71138
71139+#undef __vmalloc
71140 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71141 {
71142 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71143@@ -1702,6 +1765,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
71144 * For tight control over page level allocator and protection flags
71145 * use __vmalloc() instead.
71146 */
71147+#undef vmalloc
71148 void *vmalloc(unsigned long size)
71149 {
71150 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71151@@ -1718,6 +1782,7 @@ EXPORT_SYMBOL(vmalloc);
71152 * For tight control over page level allocator and protection flags
71153 * use __vmalloc() instead.
71154 */
71155+#undef vzalloc
71156 void *vzalloc(unsigned long size)
71157 {
71158 return __vmalloc_node_flags(size, -1,
71159@@ -1732,6 +1797,7 @@ EXPORT_SYMBOL(vzalloc);
71160 * The resulting memory area is zeroed so it can be mapped to userspace
71161 * without leaking data.
71162 */
71163+#undef vmalloc_user
71164 void *vmalloc_user(unsigned long size)
71165 {
71166 struct vm_struct *area;
71167@@ -1759,6 +1825,7 @@ EXPORT_SYMBOL(vmalloc_user);
71168 * For tight control over page level allocator and protection flags
71169 * use __vmalloc() instead.
71170 */
71171+#undef vmalloc_node
71172 void *vmalloc_node(unsigned long size, int node)
71173 {
71174 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71175@@ -1778,6 +1845,7 @@ EXPORT_SYMBOL(vmalloc_node);
71176 * For tight control over page level allocator and protection flags
71177 * use __vmalloc_node() instead.
71178 */
71179+#undef vzalloc_node
71180 void *vzalloc_node(unsigned long size, int node)
71181 {
71182 return __vmalloc_node_flags(size, node,
71183@@ -1800,10 +1868,10 @@ EXPORT_SYMBOL(vzalloc_node);
71184 * For tight control over page level allocator and protection flags
71185 * use __vmalloc() instead.
71186 */
71187-
71188+#undef vmalloc_exec
71189 void *vmalloc_exec(unsigned long size)
71190 {
71191- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71192+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71193 -1, __builtin_return_address(0));
71194 }
71195
71196@@ -1822,6 +1890,7 @@ void *vmalloc_exec(unsigned long size)
71197 * Allocate enough 32bit PA addressable pages to cover @size from the
71198 * page level allocator and map them into contiguous kernel virtual space.
71199 */
71200+#undef vmalloc_32
71201 void *vmalloc_32(unsigned long size)
71202 {
71203 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71204@@ -1836,6 +1905,7 @@ EXPORT_SYMBOL(vmalloc_32);
71205 * The resulting memory area is 32bit addressable and zeroed so it can be
71206 * mapped to userspace without leaking data.
71207 */
71208+#undef vmalloc_32_user
71209 void *vmalloc_32_user(unsigned long size)
71210 {
71211 struct vm_struct *area;
71212@@ -2098,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
71213 unsigned long uaddr = vma->vm_start;
71214 unsigned long usize = vma->vm_end - vma->vm_start;
71215
71216+ BUG_ON(vma->vm_mirror);
71217+
71218 if ((PAGE_SIZE-1) & (unsigned long)addr)
71219 return -EINVAL;
71220
71221diff --git a/mm/vmstat.c b/mm/vmstat.c
71222index 8fd603b..cf0d930 100644
71223--- a/mm/vmstat.c
71224+++ b/mm/vmstat.c
71225@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
71226 *
71227 * vm_stat contains the global counters
71228 */
71229-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71230+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71231 EXPORT_SYMBOL(vm_stat);
71232
71233 #ifdef CONFIG_SMP
71234@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
71235 v = p->vm_stat_diff[i];
71236 p->vm_stat_diff[i] = 0;
71237 local_irq_restore(flags);
71238- atomic_long_add(v, &zone->vm_stat[i]);
71239+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71240 global_diff[i] += v;
71241 #ifdef CONFIG_NUMA
71242 /* 3 seconds idle till flush */
71243@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
71244
71245 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71246 if (global_diff[i])
71247- atomic_long_add(global_diff[i], &vm_stat[i]);
71248+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71249 }
71250
71251 #endif
71252@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
71253 start_cpu_timer(cpu);
71254 #endif
71255 #ifdef CONFIG_PROC_FS
71256- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71257- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71258- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71259- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71260+ {
71261+ mode_t gr_mode = S_IRUGO;
71262+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71263+ gr_mode = S_IRUSR;
71264+#endif
71265+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71266+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71267+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71268+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71269+#else
71270+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71271+#endif
71272+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71273+ }
71274 #endif
71275 return 0;
71276 }
71277diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
71278index 5471628..cef8398 100644
71279--- a/net/8021q/vlan.c
71280+++ b/net/8021q/vlan.c
71281@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
71282 err = -EPERM;
71283 if (!capable(CAP_NET_ADMIN))
71284 break;
71285- if ((args.u.name_type >= 0) &&
71286- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71287+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71288 struct vlan_net *vn;
71289
71290 vn = net_generic(net, vlan_net_id);
71291diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
71292index fdfdb57..38d368c 100644
71293--- a/net/9p/trans_fd.c
71294+++ b/net/9p/trans_fd.c
71295@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
71296 oldfs = get_fs();
71297 set_fs(get_ds());
71298 /* The cast to a user pointer is valid due to the set_fs() */
71299- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
71300+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
71301 set_fs(oldfs);
71302
71303 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
71304diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
71305index f41f026..fe76ea8 100644
71306--- a/net/atm/atm_misc.c
71307+++ b/net/atm/atm_misc.c
71308@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
71309 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71310 return 1;
71311 atm_return(vcc, truesize);
71312- atomic_inc(&vcc->stats->rx_drop);
71313+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71314 return 0;
71315 }
71316 EXPORT_SYMBOL(atm_charge);
71317@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
71318 }
71319 }
71320 atm_return(vcc, guess);
71321- atomic_inc(&vcc->stats->rx_drop);
71322+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71323 return NULL;
71324 }
71325 EXPORT_SYMBOL(atm_alloc_charge);
71326@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
71327
71328 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71329 {
71330-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71331+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71332 __SONET_ITEMS
71333 #undef __HANDLE_ITEM
71334 }
71335@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
71336
71337 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71338 {
71339-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71340+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71341 __SONET_ITEMS
71342 #undef __HANDLE_ITEM
71343 }
71344diff --git a/net/atm/lec.h b/net/atm/lec.h
71345index dfc0719..47c5322 100644
71346--- a/net/atm/lec.h
71347+++ b/net/atm/lec.h
71348@@ -48,7 +48,7 @@ struct lane2_ops {
71349 const u8 *tlvs, u32 sizeoftlvs);
71350 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71351 const u8 *tlvs, u32 sizeoftlvs);
71352-};
71353+} __no_const;
71354
71355 /*
71356 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71357diff --git a/net/atm/mpc.h b/net/atm/mpc.h
71358index 0919a88..a23d54e 100644
71359--- a/net/atm/mpc.h
71360+++ b/net/atm/mpc.h
71361@@ -33,7 +33,7 @@ struct mpoa_client {
71362 struct mpc_parameters parameters; /* parameters for this client */
71363
71364 const struct net_device_ops *old_ops;
71365- struct net_device_ops new_ops;
71366+ net_device_ops_no_const new_ops;
71367 };
71368
71369
71370diff --git a/net/atm/proc.c b/net/atm/proc.c
71371index 0d020de..011c7bb 100644
71372--- a/net/atm/proc.c
71373+++ b/net/atm/proc.c
71374@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
71375 const struct k_atm_aal_stats *stats)
71376 {
71377 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71378- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71379- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71380- atomic_read(&stats->rx_drop));
71381+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71382+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71383+ atomic_read_unchecked(&stats->rx_drop));
71384 }
71385
71386 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71387diff --git a/net/atm/resources.c b/net/atm/resources.c
71388index 23f45ce..c748f1a 100644
71389--- a/net/atm/resources.c
71390+++ b/net/atm/resources.c
71391@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71392 static void copy_aal_stats(struct k_atm_aal_stats *from,
71393 struct atm_aal_stats *to)
71394 {
71395-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71396+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71397 __AAL_STAT_ITEMS
71398 #undef __HANDLE_ITEM
71399 }
71400@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
71401 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71402 struct atm_aal_stats *to)
71403 {
71404-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71405+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71406 __AAL_STAT_ITEMS
71407 #undef __HANDLE_ITEM
71408 }
71409diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
71410index 3512e25..2b33401 100644
71411--- a/net/batman-adv/bat_iv_ogm.c
71412+++ b/net/batman-adv/bat_iv_ogm.c
71413@@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71414
71415 /* change sequence number to network order */
71416 batman_ogm_packet->seqno =
71417- htonl((uint32_t)atomic_read(&hard_iface->seqno));
71418+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71419
71420 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
71421 batman_ogm_packet->tt_crc = htons((uint16_t)
71422@@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71423 else
71424 batman_ogm_packet->gw_flags = NO_FLAGS;
71425
71426- atomic_inc(&hard_iface->seqno);
71427+ atomic_inc_unchecked(&hard_iface->seqno);
71428
71429 slide_own_bcast_window(hard_iface);
71430 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
71431@@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
71432 return;
71433
71434 /* could be changed by schedule_own_packet() */
71435- if_incoming_seqno = atomic_read(&if_incoming->seqno);
71436+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71437
71438 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
71439
71440diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
71441index 7704df4..beb4e16 100644
71442--- a/net/batman-adv/hard-interface.c
71443+++ b/net/batman-adv/hard-interface.c
71444@@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
71445 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71446 dev_add_pack(&hard_iface->batman_adv_ptype);
71447
71448- atomic_set(&hard_iface->seqno, 1);
71449- atomic_set(&hard_iface->frag_seqno, 1);
71450+ atomic_set_unchecked(&hard_iface->seqno, 1);
71451+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71452 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71453 hard_iface->net_dev->name);
71454
71455diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
71456index f9cc957..efd9dae 100644
71457--- a/net/batman-adv/soft-interface.c
71458+++ b/net/batman-adv/soft-interface.c
71459@@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
71460
71461 /* set broadcast sequence number */
71462 bcast_packet->seqno =
71463- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
71464+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
71465
71466 add_bcast_packet_to_list(bat_priv, skb, 1);
71467
71468@@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
71469 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
71470
71471 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
71472- atomic_set(&bat_priv->bcast_seqno, 1);
71473+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
71474 atomic_set(&bat_priv->ttvn, 0);
71475 atomic_set(&bat_priv->tt_local_changes, 0);
71476 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
71477diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
71478index ab8d0fe..ceba3fd 100644
71479--- a/net/batman-adv/types.h
71480+++ b/net/batman-adv/types.h
71481@@ -38,8 +38,8 @@ struct hard_iface {
71482 int16_t if_num;
71483 char if_status;
71484 struct net_device *net_dev;
71485- atomic_t seqno;
71486- atomic_t frag_seqno;
71487+ atomic_unchecked_t seqno;
71488+ atomic_unchecked_t frag_seqno;
71489 unsigned char *packet_buff;
71490 int packet_len;
71491 struct kobject *hardif_obj;
71492@@ -154,7 +154,7 @@ struct bat_priv {
71493 atomic_t orig_interval; /* uint */
71494 atomic_t hop_penalty; /* uint */
71495 atomic_t log_level; /* uint */
71496- atomic_t bcast_seqno;
71497+ atomic_unchecked_t bcast_seqno;
71498 atomic_t bcast_queue_left;
71499 atomic_t batman_queue_left;
71500 atomic_t ttvn; /* translation table version number */
71501diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
71502index 07d1c1d..7e9bea9 100644
71503--- a/net/batman-adv/unicast.c
71504+++ b/net/batman-adv/unicast.c
71505@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
71506 frag1->flags = UNI_FRAG_HEAD | large_tail;
71507 frag2->flags = large_tail;
71508
71509- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
71510+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
71511 frag1->seqno = htons(seqno - 1);
71512 frag2->seqno = htons(seqno);
71513
71514diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
71515index c1c597e..05ebb40 100644
71516--- a/net/bluetooth/hci_conn.c
71517+++ b/net/bluetooth/hci_conn.c
71518@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
71519 memset(&cp, 0, sizeof(cp));
71520
71521 cp.handle = cpu_to_le16(conn->handle);
71522- memcpy(cp.ltk, ltk, sizeof(ltk));
71523+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
71524
71525 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
71526 }
71527diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
71528index 17b5b1c..826d872 100644
71529--- a/net/bluetooth/l2cap_core.c
71530+++ b/net/bluetooth/l2cap_core.c
71531@@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
71532 break;
71533
71534 case L2CAP_CONF_RFC:
71535- if (olen == sizeof(rfc))
71536- memcpy(&rfc, (void *)val, olen);
71537+ if (olen != sizeof(rfc))
71538+ break;
71539+
71540+ memcpy(&rfc, (void *)val, olen);
71541
71542 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
71543 rfc.mode != chan->mode)
71544@@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
71545
71546 switch (type) {
71547 case L2CAP_CONF_RFC:
71548- if (olen == sizeof(rfc))
71549- memcpy(&rfc, (void *)val, olen);
71550+ if (olen != sizeof(rfc))
71551+ break;
71552+
71553+ memcpy(&rfc, (void *)val, olen);
71554 goto done;
71555 }
71556 }
71557diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
71558index a5f4e57..910ee6d 100644
71559--- a/net/bridge/br_multicast.c
71560+++ b/net/bridge/br_multicast.c
71561@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
71562 nexthdr = ip6h->nexthdr;
71563 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
71564
71565- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
71566+ if (nexthdr != IPPROTO_ICMPV6)
71567 return 0;
71568
71569 /* Okay, we found ICMPv6 header */
71570diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
71571index 5864cc4..121f3a3 100644
71572--- a/net/bridge/netfilter/ebtables.c
71573+++ b/net/bridge/netfilter/ebtables.c
71574@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
71575 tmp.valid_hooks = t->table->valid_hooks;
71576 }
71577 mutex_unlock(&ebt_mutex);
71578- if (copy_to_user(user, &tmp, *len) != 0){
71579+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
71580 BUGPRINT("c2u Didn't work\n");
71581 ret = -EFAULT;
71582 break;
71583diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
71584index a986280..13444a1 100644
71585--- a/net/caif/caif_socket.c
71586+++ b/net/caif/caif_socket.c
71587@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
71588 #ifdef CONFIG_DEBUG_FS
71589 struct debug_fs_counter {
71590 atomic_t caif_nr_socks;
71591- atomic_t caif_sock_create;
71592- atomic_t num_connect_req;
71593- atomic_t num_connect_resp;
71594- atomic_t num_connect_fail_resp;
71595- atomic_t num_disconnect;
71596- atomic_t num_remote_shutdown_ind;
71597- atomic_t num_tx_flow_off_ind;
71598- atomic_t num_tx_flow_on_ind;
71599- atomic_t num_rx_flow_off;
71600- atomic_t num_rx_flow_on;
71601+ atomic_unchecked_t caif_sock_create;
71602+ atomic_unchecked_t num_connect_req;
71603+ atomic_unchecked_t num_connect_resp;
71604+ atomic_unchecked_t num_connect_fail_resp;
71605+ atomic_unchecked_t num_disconnect;
71606+ atomic_unchecked_t num_remote_shutdown_ind;
71607+ atomic_unchecked_t num_tx_flow_off_ind;
71608+ atomic_unchecked_t num_tx_flow_on_ind;
71609+ atomic_unchecked_t num_rx_flow_off;
71610+ atomic_unchecked_t num_rx_flow_on;
71611 };
71612 static struct debug_fs_counter cnt;
71613 #define dbfs_atomic_inc(v) atomic_inc_return(v)
71614+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
71615 #define dbfs_atomic_dec(v) atomic_dec_return(v)
71616 #else
71617 #define dbfs_atomic_inc(v) 0
71618@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
71619 atomic_read(&cf_sk->sk.sk_rmem_alloc),
71620 sk_rcvbuf_lowwater(cf_sk));
71621 set_rx_flow_off(cf_sk);
71622- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71623+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71624 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71625 }
71626
71627@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
71628 set_rx_flow_off(cf_sk);
71629 if (net_ratelimit())
71630 pr_debug("sending flow OFF due to rmem_schedule\n");
71631- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71632+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71633 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71634 }
71635 skb->dev = NULL;
71636@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
71637 switch (flow) {
71638 case CAIF_CTRLCMD_FLOW_ON_IND:
71639 /* OK from modem to start sending again */
71640- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
71641+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
71642 set_tx_flow_on(cf_sk);
71643 cf_sk->sk.sk_state_change(&cf_sk->sk);
71644 break;
71645
71646 case CAIF_CTRLCMD_FLOW_OFF_IND:
71647 /* Modem asks us to shut up */
71648- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
71649+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
71650 set_tx_flow_off(cf_sk);
71651 cf_sk->sk.sk_state_change(&cf_sk->sk);
71652 break;
71653@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
71654 /* We're now connected */
71655 caif_client_register_refcnt(&cf_sk->layer,
71656 cfsk_hold, cfsk_put);
71657- dbfs_atomic_inc(&cnt.num_connect_resp);
71658+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
71659 cf_sk->sk.sk_state = CAIF_CONNECTED;
71660 set_tx_flow_on(cf_sk);
71661 cf_sk->sk.sk_state_change(&cf_sk->sk);
71662@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
71663
71664 case CAIF_CTRLCMD_INIT_FAIL_RSP:
71665 /* Connect request failed */
71666- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
71667+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
71668 cf_sk->sk.sk_err = ECONNREFUSED;
71669 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
71670 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71671@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
71672
71673 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
71674 /* Modem has closed this connection, or device is down. */
71675- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
71676+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
71677 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71678 cf_sk->sk.sk_err = ECONNRESET;
71679 set_rx_flow_on(cf_sk);
71680@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
71681 return;
71682
71683 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
71684- dbfs_atomic_inc(&cnt.num_rx_flow_on);
71685+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
71686 set_rx_flow_on(cf_sk);
71687 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
71688 }
71689@@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
71690 /*ifindex = id of the interface.*/
71691 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
71692
71693- dbfs_atomic_inc(&cnt.num_connect_req);
71694+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
71695 cf_sk->layer.receive = caif_sktrecv_cb;
71696
71697 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
71698@@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
71699 spin_unlock_bh(&sk->sk_receive_queue.lock);
71700 sock->sk = NULL;
71701
71702- dbfs_atomic_inc(&cnt.num_disconnect);
71703+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
71704
71705 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
71706 if (cf_sk->debugfs_socket_dir != NULL)
71707@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
71708 cf_sk->conn_req.protocol = protocol;
71709 /* Increase the number of sockets created. */
71710 dbfs_atomic_inc(&cnt.caif_nr_socks);
71711- num = dbfs_atomic_inc(&cnt.caif_sock_create);
71712+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
71713 #ifdef CONFIG_DEBUG_FS
71714 if (!IS_ERR(debugfsdir)) {
71715
71716diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
71717index 5cf5222..6f704ad 100644
71718--- a/net/caif/cfctrl.c
71719+++ b/net/caif/cfctrl.c
71720@@ -9,6 +9,7 @@
71721 #include <linux/stddef.h>
71722 #include <linux/spinlock.h>
71723 #include <linux/slab.h>
71724+#include <linux/sched.h>
71725 #include <net/caif/caif_layer.h>
71726 #include <net/caif/cfpkt.h>
71727 #include <net/caif/cfctrl.h>
71728@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
71729 memset(&dev_info, 0, sizeof(dev_info));
71730 dev_info.id = 0xff;
71731 cfsrvl_init(&this->serv, 0, &dev_info, false);
71732- atomic_set(&this->req_seq_no, 1);
71733- atomic_set(&this->rsp_seq_no, 1);
71734+ atomic_set_unchecked(&this->req_seq_no, 1);
71735+ atomic_set_unchecked(&this->rsp_seq_no, 1);
71736 this->serv.layer.receive = cfctrl_recv;
71737 sprintf(this->serv.layer.name, "ctrl");
71738 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
71739@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
71740 struct cfctrl_request_info *req)
71741 {
71742 spin_lock_bh(&ctrl->info_list_lock);
71743- atomic_inc(&ctrl->req_seq_no);
71744- req->sequence_no = atomic_read(&ctrl->req_seq_no);
71745+ atomic_inc_unchecked(&ctrl->req_seq_no);
71746+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
71747 list_add_tail(&req->list, &ctrl->list);
71748 spin_unlock_bh(&ctrl->info_list_lock);
71749 }
71750@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
71751 if (p != first)
71752 pr_warn("Requests are not received in order\n");
71753
71754- atomic_set(&ctrl->rsp_seq_no,
71755+ atomic_set_unchecked(&ctrl->rsp_seq_no,
71756 p->sequence_no);
71757 list_del(&p->list);
71758 goto out;
71759diff --git a/net/can/gw.c b/net/can/gw.c
71760index 3d79b12..8de85fa 100644
71761--- a/net/can/gw.c
71762+++ b/net/can/gw.c
71763@@ -96,7 +96,7 @@ struct cf_mod {
71764 struct {
71765 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
71766 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
71767- } csumfunc;
71768+ } __no_const csumfunc;
71769 };
71770
71771
71772diff --git a/net/compat.c b/net/compat.c
71773index 6def90e..c6992fa 100644
71774--- a/net/compat.c
71775+++ b/net/compat.c
71776@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
71777 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
71778 __get_user(kmsg->msg_flags, &umsg->msg_flags))
71779 return -EFAULT;
71780- kmsg->msg_name = compat_ptr(tmp1);
71781- kmsg->msg_iov = compat_ptr(tmp2);
71782- kmsg->msg_control = compat_ptr(tmp3);
71783+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
71784+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
71785+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
71786 return 0;
71787 }
71788
71789@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
71790
71791 if (kern_msg->msg_namelen) {
71792 if (mode == VERIFY_READ) {
71793- int err = move_addr_to_kernel(kern_msg->msg_name,
71794+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
71795 kern_msg->msg_namelen,
71796 kern_address);
71797 if (err < 0)
71798@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
71799 kern_msg->msg_name = NULL;
71800
71801 tot_len = iov_from_user_compat_to_kern(kern_iov,
71802- (struct compat_iovec __user *)kern_msg->msg_iov,
71803+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
71804 kern_msg->msg_iovlen);
71805 if (tot_len >= 0)
71806 kern_msg->msg_iov = kern_iov;
71807@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
71808
71809 #define CMSG_COMPAT_FIRSTHDR(msg) \
71810 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
71811- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
71812+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
71813 (struct compat_cmsghdr __user *)NULL)
71814
71815 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
71816 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
71817 (ucmlen) <= (unsigned long) \
71818 ((mhdr)->msg_controllen - \
71819- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
71820+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
71821
71822 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
71823 struct compat_cmsghdr __user *cmsg, int cmsg_len)
71824 {
71825 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
71826- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
71827+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
71828 msg->msg_controllen)
71829 return NULL;
71830 return (struct compat_cmsghdr __user *)ptr;
71831@@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
71832 {
71833 struct compat_timeval ctv;
71834 struct compat_timespec cts[3];
71835- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
71836+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
71837 struct compat_cmsghdr cmhdr;
71838 int cmlen;
71839
71840@@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
71841
71842 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
71843 {
71844- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
71845+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
71846 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
71847 int fdnum = scm->fp->count;
71848 struct file **fp = scm->fp->fp;
71849@@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
71850 return -EFAULT;
71851 old_fs = get_fs();
71852 set_fs(KERNEL_DS);
71853- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
71854+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
71855 set_fs(old_fs);
71856
71857 return err;
71858@@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
71859 len = sizeof(ktime);
71860 old_fs = get_fs();
71861 set_fs(KERNEL_DS);
71862- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
71863+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
71864 set_fs(old_fs);
71865
71866 if (!err) {
71867@@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
71868 case MCAST_JOIN_GROUP:
71869 case MCAST_LEAVE_GROUP:
71870 {
71871- struct compat_group_req __user *gr32 = (void *)optval;
71872+ struct compat_group_req __user *gr32 = (void __user *)optval;
71873 struct group_req __user *kgr =
71874 compat_alloc_user_space(sizeof(struct group_req));
71875 u32 interface;
71876@@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
71877 case MCAST_BLOCK_SOURCE:
71878 case MCAST_UNBLOCK_SOURCE:
71879 {
71880- struct compat_group_source_req __user *gsr32 = (void *)optval;
71881+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
71882 struct group_source_req __user *kgsr = compat_alloc_user_space(
71883 sizeof(struct group_source_req));
71884 u32 interface;
71885@@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
71886 }
71887 case MCAST_MSFILTER:
71888 {
71889- struct compat_group_filter __user *gf32 = (void *)optval;
71890+ struct compat_group_filter __user *gf32 = (void __user *)optval;
71891 struct group_filter __user *kgf;
71892 u32 interface, fmode, numsrc;
71893
71894@@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
71895 char __user *optval, int __user *optlen,
71896 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
71897 {
71898- struct compat_group_filter __user *gf32 = (void *)optval;
71899+ struct compat_group_filter __user *gf32 = (void __user *)optval;
71900 struct group_filter __user *kgf;
71901 int __user *koptlen;
71902 u32 interface, fmode, numsrc;
71903diff --git a/net/core/datagram.c b/net/core/datagram.c
71904index 68bbf9f..5ef0d12 100644
71905--- a/net/core/datagram.c
71906+++ b/net/core/datagram.c
71907@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
71908 }
71909
71910 kfree_skb(skb);
71911- atomic_inc(&sk->sk_drops);
71912+ atomic_inc_unchecked(&sk->sk_drops);
71913 sk_mem_reclaim_partial(sk);
71914
71915 return err;
71916diff --git a/net/core/dev.c b/net/core/dev.c
71917index 5a13edf..1bc016b 100644
71918--- a/net/core/dev.c
71919+++ b/net/core/dev.c
71920@@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
71921 if (no_module && capable(CAP_NET_ADMIN))
71922 no_module = request_module("netdev-%s", name);
71923 if (no_module && capable(CAP_SYS_MODULE)) {
71924+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71925+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
71926+#else
71927 if (!request_module("%s", name))
71928 pr_err("Loading kernel module for a network device "
71929 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71930 "instead\n", name);
71931+#endif
71932 }
71933 }
71934 EXPORT_SYMBOL(dev_load);
71935@@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
71936
71937 struct dev_gso_cb {
71938 void (*destructor)(struct sk_buff *skb);
71939-};
71940+} __no_const;
71941
71942 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71943
71944@@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb)
71945 }
71946 EXPORT_SYMBOL(netif_rx_ni);
71947
71948-static void net_tx_action(struct softirq_action *h)
71949+static void net_tx_action(void)
71950 {
71951 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71952
71953@@ -3891,7 +3895,7 @@ void netif_napi_del(struct napi_struct *napi)
71954 }
71955 EXPORT_SYMBOL(netif_napi_del);
71956
71957-static void net_rx_action(struct softirq_action *h)
71958+static void net_rx_action(void)
71959 {
71960 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71961 unsigned long time_limit = jiffies + 2;
71962diff --git a/net/core/flow.c b/net/core/flow.c
71963index e318c7e..168b1d0 100644
71964--- a/net/core/flow.c
71965+++ b/net/core/flow.c
71966@@ -61,7 +61,7 @@ struct flow_cache {
71967 struct timer_list rnd_timer;
71968 };
71969
71970-atomic_t flow_cache_genid = ATOMIC_INIT(0);
71971+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71972 EXPORT_SYMBOL(flow_cache_genid);
71973 static struct flow_cache flow_cache_global;
71974 static struct kmem_cache *flow_cachep __read_mostly;
71975@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
71976
71977 static int flow_entry_valid(struct flow_cache_entry *fle)
71978 {
71979- if (atomic_read(&flow_cache_genid) != fle->genid)
71980+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
71981 return 0;
71982 if (fle->object && !fle->object->ops->check(fle->object))
71983 return 0;
71984@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
71985 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
71986 fcp->hash_count++;
71987 }
71988- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
71989+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
71990 flo = fle->object;
71991 if (!flo)
71992 goto ret_object;
71993@@ -280,7 +280,7 @@ nocache:
71994 }
71995 flo = resolver(net, key, family, dir, flo, ctx);
71996 if (fle) {
71997- fle->genid = atomic_read(&flow_cache_genid);
71998+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
71999 if (!IS_ERR(flo))
72000 fle->object = flo;
72001 else
72002diff --git a/net/core/iovec.c b/net/core/iovec.c
72003index c40f27e..7f49254 100644
72004--- a/net/core/iovec.c
72005+++ b/net/core/iovec.c
72006@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72007 if (m->msg_namelen) {
72008 if (mode == VERIFY_READ) {
72009 void __user *namep;
72010- namep = (void __user __force *) m->msg_name;
72011+ namep = (void __force_user *) m->msg_name;
72012 err = move_addr_to_kernel(namep, m->msg_namelen,
72013 address);
72014 if (err < 0)
72015@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72016 }
72017
72018 size = m->msg_iovlen * sizeof(struct iovec);
72019- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72020+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72021 return -EFAULT;
72022
72023 m->msg_iov = iov;
72024diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
72025index 9083e82..1673203 100644
72026--- a/net/core/rtnetlink.c
72027+++ b/net/core/rtnetlink.c
72028@@ -57,7 +57,7 @@ struct rtnl_link {
72029 rtnl_doit_func doit;
72030 rtnl_dumpit_func dumpit;
72031 rtnl_calcit_func calcit;
72032-};
72033+} __no_const;
72034
72035 static DEFINE_MUTEX(rtnl_mutex);
72036 static u16 min_ifinfo_dump_size;
72037diff --git a/net/core/scm.c b/net/core/scm.c
72038index ff52ad0..aff1c0f 100644
72039--- a/net/core/scm.c
72040+++ b/net/core/scm.c
72041@@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
72042 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72043 {
72044 struct cmsghdr __user *cm
72045- = (__force struct cmsghdr __user *)msg->msg_control;
72046+ = (struct cmsghdr __force_user *)msg->msg_control;
72047 struct cmsghdr cmhdr;
72048 int cmlen = CMSG_LEN(len);
72049 int err;
72050@@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72051 err = -EFAULT;
72052 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72053 goto out;
72054- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72055+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72056 goto out;
72057 cmlen = CMSG_SPACE(len);
72058 if (msg->msg_controllen < cmlen)
72059@@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
72060 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72061 {
72062 struct cmsghdr __user *cm
72063- = (__force struct cmsghdr __user*)msg->msg_control;
72064+ = (struct cmsghdr __force_user *)msg->msg_control;
72065
72066 int fdmax = 0;
72067 int fdnum = scm->fp->count;
72068@@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72069 if (fdnum < fdmax)
72070 fdmax = fdnum;
72071
72072- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72073+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72074 i++, cmfptr++)
72075 {
72076 int new_fd;
72077diff --git a/net/core/sock.c b/net/core/sock.c
72078index b23f174..b9a0d26 100644
72079--- a/net/core/sock.c
72080+++ b/net/core/sock.c
72081@@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72082 struct sk_buff_head *list = &sk->sk_receive_queue;
72083
72084 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
72085- atomic_inc(&sk->sk_drops);
72086+ atomic_inc_unchecked(&sk->sk_drops);
72087 trace_sock_rcvqueue_full(sk, skb);
72088 return -ENOMEM;
72089 }
72090@@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72091 return err;
72092
72093 if (!sk_rmem_schedule(sk, skb->truesize)) {
72094- atomic_inc(&sk->sk_drops);
72095+ atomic_inc_unchecked(&sk->sk_drops);
72096 return -ENOBUFS;
72097 }
72098
72099@@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72100 skb_dst_force(skb);
72101
72102 spin_lock_irqsave(&list->lock, flags);
72103- skb->dropcount = atomic_read(&sk->sk_drops);
72104+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72105 __skb_queue_tail(list, skb);
72106 spin_unlock_irqrestore(&list->lock, flags);
72107
72108@@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72109 skb->dev = NULL;
72110
72111 if (sk_rcvqueues_full(sk, skb)) {
72112- atomic_inc(&sk->sk_drops);
72113+ atomic_inc_unchecked(&sk->sk_drops);
72114 goto discard_and_relse;
72115 }
72116 if (nested)
72117@@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72118 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72119 } else if (sk_add_backlog(sk, skb)) {
72120 bh_unlock_sock(sk);
72121- atomic_inc(&sk->sk_drops);
72122+ atomic_inc_unchecked(&sk->sk_drops);
72123 goto discard_and_relse;
72124 }
72125
72126@@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72127 if (len > sizeof(peercred))
72128 len = sizeof(peercred);
72129 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72130- if (copy_to_user(optval, &peercred, len))
72131+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72132 return -EFAULT;
72133 goto lenout;
72134 }
72135@@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72136 return -ENOTCONN;
72137 if (lv < len)
72138 return -EINVAL;
72139- if (copy_to_user(optval, address, len))
72140+ if (len > sizeof(address) || copy_to_user(optval, address, len))
72141 return -EFAULT;
72142 goto lenout;
72143 }
72144@@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72145
72146 if (len > lv)
72147 len = lv;
72148- if (copy_to_user(optval, &v, len))
72149+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
72150 return -EFAULT;
72151 lenout:
72152 if (put_user(len, optlen))
72153@@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
72154 */
72155 smp_wmb();
72156 atomic_set(&sk->sk_refcnt, 1);
72157- atomic_set(&sk->sk_drops, 0);
72158+ atomic_set_unchecked(&sk->sk_drops, 0);
72159 }
72160 EXPORT_SYMBOL(sock_init_data);
72161
72162diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
72163index 02e75d1..9a57a7c 100644
72164--- a/net/decnet/sysctl_net_decnet.c
72165+++ b/net/decnet/sysctl_net_decnet.c
72166@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
72167
72168 if (len > *lenp) len = *lenp;
72169
72170- if (copy_to_user(buffer, addr, len))
72171+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
72172 return -EFAULT;
72173
72174 *lenp = len;
72175@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
72176
72177 if (len > *lenp) len = *lenp;
72178
72179- if (copy_to_user(buffer, devname, len))
72180+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
72181 return -EFAULT;
72182
72183 *lenp = len;
72184diff --git a/net/econet/Kconfig b/net/econet/Kconfig
72185index 39a2d29..f39c0fe 100644
72186--- a/net/econet/Kconfig
72187+++ b/net/econet/Kconfig
72188@@ -4,7 +4,7 @@
72189
72190 config ECONET
72191 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
72192- depends on EXPERIMENTAL && INET
72193+ depends on EXPERIMENTAL && INET && BROKEN
72194 ---help---
72195 Econet is a fairly old and slow networking protocol mainly used by
72196 Acorn computers to access file and print servers. It uses native
72197diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
72198index 92fc5f6..b790d91 100644
72199--- a/net/ipv4/fib_frontend.c
72200+++ b/net/ipv4/fib_frontend.c
72201@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
72202 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72203 fib_sync_up(dev);
72204 #endif
72205- atomic_inc(&net->ipv4.dev_addr_genid);
72206+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72207 rt_cache_flush(dev_net(dev), -1);
72208 break;
72209 case NETDEV_DOWN:
72210 fib_del_ifaddr(ifa, NULL);
72211- atomic_inc(&net->ipv4.dev_addr_genid);
72212+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72213 if (ifa->ifa_dev->ifa_list == NULL) {
72214 /* Last address was deleted from this interface.
72215 * Disable IP.
72216@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
72217 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72218 fib_sync_up(dev);
72219 #endif
72220- atomic_inc(&net->ipv4.dev_addr_genid);
72221+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72222 rt_cache_flush(dev_net(dev), -1);
72223 break;
72224 case NETDEV_DOWN:
72225diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
72226index 80106d8..232e898 100644
72227--- a/net/ipv4/fib_semantics.c
72228+++ b/net/ipv4/fib_semantics.c
72229@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
72230 nh->nh_saddr = inet_select_addr(nh->nh_dev,
72231 nh->nh_gw,
72232 nh->nh_parent->fib_scope);
72233- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
72234+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
72235
72236 return nh->nh_saddr;
72237 }
72238diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
72239index ccee270..db23c3c 100644
72240--- a/net/ipv4/inet_diag.c
72241+++ b/net/ipv4/inet_diag.c
72242@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
72243 r->idiag_retrans = 0;
72244
72245 r->id.idiag_if = sk->sk_bound_dev_if;
72246+
72247+#ifdef CONFIG_GRKERNSEC_HIDESYM
72248+ r->id.idiag_cookie[0] = 0;
72249+ r->id.idiag_cookie[1] = 0;
72250+#else
72251 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
72252 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
72253+#endif
72254
72255 r->id.idiag_sport = inet->inet_sport;
72256 r->id.idiag_dport = inet->inet_dport;
72257@@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
72258 r->idiag_family = tw->tw_family;
72259 r->idiag_retrans = 0;
72260 r->id.idiag_if = tw->tw_bound_dev_if;
72261+
72262+#ifdef CONFIG_GRKERNSEC_HIDESYM
72263+ r->id.idiag_cookie[0] = 0;
72264+ r->id.idiag_cookie[1] = 0;
72265+#else
72266 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
72267 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
72268+#endif
72269+
72270 r->id.idiag_sport = tw->tw_sport;
72271 r->id.idiag_dport = tw->tw_dport;
72272 r->id.idiag_src[0] = tw->tw_rcv_saddr;
72273@@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
72274 if (sk == NULL)
72275 goto unlock;
72276
72277+#ifndef CONFIG_GRKERNSEC_HIDESYM
72278 err = -ESTALE;
72279 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
72280 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
72281 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
72282 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
72283 goto out;
72284+#endif
72285
72286 err = -ENOMEM;
72287 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
72288@@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
72289 r->idiag_retrans = req->retrans;
72290
72291 r->id.idiag_if = sk->sk_bound_dev_if;
72292+
72293+#ifdef CONFIG_GRKERNSEC_HIDESYM
72294+ r->id.idiag_cookie[0] = 0;
72295+ r->id.idiag_cookie[1] = 0;
72296+#else
72297 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
72298 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
72299+#endif
72300
72301 tmo = req->expires - jiffies;
72302 if (tmo < 0)
72303diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
72304index 984ec65..97ac518 100644
72305--- a/net/ipv4/inet_hashtables.c
72306+++ b/net/ipv4/inet_hashtables.c
72307@@ -18,12 +18,15 @@
72308 #include <linux/sched.h>
72309 #include <linux/slab.h>
72310 #include <linux/wait.h>
72311+#include <linux/security.h>
72312
72313 #include <net/inet_connection_sock.h>
72314 #include <net/inet_hashtables.h>
72315 #include <net/secure_seq.h>
72316 #include <net/ip.h>
72317
72318+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72319+
72320 /*
72321 * Allocate and initialize a new local port bind bucket.
72322 * The bindhash mutex for snum's hash chain must be held here.
72323@@ -530,6 +533,8 @@ ok:
72324 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
72325 spin_unlock(&head->lock);
72326
72327+ gr_update_task_in_ip_table(current, inet_sk(sk));
72328+
72329 if (tw) {
72330 inet_twsk_deschedule(tw, death_row);
72331 while (twrefcnt) {
72332diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
72333index 86f13c67..59a35b5 100644
72334--- a/net/ipv4/inetpeer.c
72335+++ b/net/ipv4/inetpeer.c
72336@@ -436,8 +436,8 @@ relookup:
72337 if (p) {
72338 p->daddr = *daddr;
72339 atomic_set(&p->refcnt, 1);
72340- atomic_set(&p->rid, 0);
72341- atomic_set(&p->ip_id_count,
72342+ atomic_set_unchecked(&p->rid, 0);
72343+ atomic_set_unchecked(&p->ip_id_count,
72344 (daddr->family == AF_INET) ?
72345 secure_ip_id(daddr->addr.a4) :
72346 secure_ipv6_id(daddr->addr.a6));
72347diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
72348index fdaabf2..0ec3205 100644
72349--- a/net/ipv4/ip_fragment.c
72350+++ b/net/ipv4/ip_fragment.c
72351@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
72352 return 0;
72353
72354 start = qp->rid;
72355- end = atomic_inc_return(&peer->rid);
72356+ end = atomic_inc_return_unchecked(&peer->rid);
72357 qp->rid = end;
72358
72359 rc = qp->q.fragments && (end - start) > max;
72360diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
72361index 09ff51b..d3968eb 100644
72362--- a/net/ipv4/ip_sockglue.c
72363+++ b/net/ipv4/ip_sockglue.c
72364@@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72365 len = min_t(unsigned int, len, opt->optlen);
72366 if (put_user(len, optlen))
72367 return -EFAULT;
72368- if (copy_to_user(optval, opt->__data, len))
72369+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72370+ copy_to_user(optval, opt->__data, len))
72371 return -EFAULT;
72372 return 0;
72373 }
72374@@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72375 if (sk->sk_type != SOCK_STREAM)
72376 return -ENOPROTOOPT;
72377
72378- msg.msg_control = optval;
72379+ msg.msg_control = (void __force_kernel *)optval;
72380 msg.msg_controllen = len;
72381 msg.msg_flags = flags;
72382
72383diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
72384index 99ec116..c5628fe 100644
72385--- a/net/ipv4/ipconfig.c
72386+++ b/net/ipv4/ipconfig.c
72387@@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
72388
72389 mm_segment_t oldfs = get_fs();
72390 set_fs(get_ds());
72391- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72392+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72393 set_fs(oldfs);
72394 return res;
72395 }
72396@@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
72397
72398 mm_segment_t oldfs = get_fs();
72399 set_fs(get_ds());
72400- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72401+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72402 set_fs(oldfs);
72403 return res;
72404 }
72405@@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
72406
72407 mm_segment_t oldfs = get_fs();
72408 set_fs(get_ds());
72409- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
72410+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
72411 set_fs(oldfs);
72412 return res;
72413 }
72414diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
72415index 2133c30..5c4b40b 100644
72416--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
72417+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
72418@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
72419
72420 *len = 0;
72421
72422- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72423+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72424 if (*octets == NULL)
72425 return 0;
72426
72427diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
72428index 43d4c3b..1914409 100644
72429--- a/net/ipv4/ping.c
72430+++ b/net/ipv4/ping.c
72431@@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
72432 sk_rmem_alloc_get(sp),
72433 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72434 atomic_read(&sp->sk_refcnt), sp,
72435- atomic_read(&sp->sk_drops), len);
72436+ atomic_read_unchecked(&sp->sk_drops), len);
72437 }
72438
72439 static int ping_seq_show(struct seq_file *seq, void *v)
72440diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
72441index 007e2eb..85a18a0 100644
72442--- a/net/ipv4/raw.c
72443+++ b/net/ipv4/raw.c
72444@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
72445 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72446 {
72447 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72448- atomic_inc(&sk->sk_drops);
72449+ atomic_inc_unchecked(&sk->sk_drops);
72450 kfree_skb(skb);
72451 return NET_RX_DROP;
72452 }
72453@@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
72454
72455 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72456 {
72457+ struct icmp_filter filter;
72458+
72459 if (optlen > sizeof(struct icmp_filter))
72460 optlen = sizeof(struct icmp_filter);
72461- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72462+ if (copy_from_user(&filter, optval, optlen))
72463 return -EFAULT;
72464+ raw_sk(sk)->filter = filter;
72465 return 0;
72466 }
72467
72468 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72469 {
72470 int len, ret = -EFAULT;
72471+ struct icmp_filter filter;
72472
72473 if (get_user(len, optlen))
72474 goto out;
72475@@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
72476 if (len > sizeof(struct icmp_filter))
72477 len = sizeof(struct icmp_filter);
72478 ret = -EFAULT;
72479- if (put_user(len, optlen) ||
72480- copy_to_user(optval, &raw_sk(sk)->filter, len))
72481+ filter = raw_sk(sk)->filter;
72482+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
72483 goto out;
72484 ret = 0;
72485 out: return ret;
72486@@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
72487 sk_wmem_alloc_get(sp),
72488 sk_rmem_alloc_get(sp),
72489 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72490- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72491+ atomic_read(&sp->sk_refcnt),
72492+#ifdef CONFIG_GRKERNSEC_HIDESYM
72493+ NULL,
72494+#else
72495+ sp,
72496+#endif
72497+ atomic_read_unchecked(&sp->sk_drops));
72498 }
72499
72500 static int raw_seq_show(struct seq_file *seq, void *v)
72501diff --git a/net/ipv4/route.c b/net/ipv4/route.c
72502index 94cdbc5..0cb0063 100644
72503--- a/net/ipv4/route.c
72504+++ b/net/ipv4/route.c
72505@@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
72506
72507 static inline int rt_genid(struct net *net)
72508 {
72509- return atomic_read(&net->ipv4.rt_genid);
72510+ return atomic_read_unchecked(&net->ipv4.rt_genid);
72511 }
72512
72513 #ifdef CONFIG_PROC_FS
72514@@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
72515 unsigned char shuffle;
72516
72517 get_random_bytes(&shuffle, sizeof(shuffle));
72518- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
72519+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
72520 redirect_genid++;
72521 }
72522
72523@@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net,
72524 error = rt->dst.error;
72525 if (peer) {
72526 inet_peer_refcheck(rt->peer);
72527- id = atomic_read(&peer->ip_id_count) & 0xffff;
72528+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
72529 if (peer->tcp_ts_stamp) {
72530 ts = peer->tcp_ts;
72531 tsage = get_seconds() - peer->tcp_ts_stamp;
72532diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
72533index a9db4b1..3c03301 100644
72534--- a/net/ipv4/tcp_ipv4.c
72535+++ b/net/ipv4/tcp_ipv4.c
72536@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
72537 int sysctl_tcp_low_latency __read_mostly;
72538 EXPORT_SYMBOL(sysctl_tcp_low_latency);
72539
72540+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72541+extern int grsec_enable_blackhole;
72542+#endif
72543
72544 #ifdef CONFIG_TCP_MD5SIG
72545 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
72546@@ -1627,6 +1630,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
72547 return 0;
72548
72549 reset:
72550+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72551+ if (!grsec_enable_blackhole)
72552+#endif
72553 tcp_v4_send_reset(rsk, skb);
72554 discard:
72555 kfree_skb(skb);
72556@@ -1689,12 +1695,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
72557 TCP_SKB_CB(skb)->sacked = 0;
72558
72559 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72560- if (!sk)
72561+ if (!sk) {
72562+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72563+ ret = 1;
72564+#endif
72565 goto no_tcp_socket;
72566-
72567+ }
72568 process:
72569- if (sk->sk_state == TCP_TIME_WAIT)
72570+ if (sk->sk_state == TCP_TIME_WAIT) {
72571+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72572+ ret = 2;
72573+#endif
72574 goto do_time_wait;
72575+ }
72576
72577 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
72578 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
72579@@ -1744,6 +1757,10 @@ no_tcp_socket:
72580 bad_packet:
72581 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72582 } else {
72583+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72584+ if (!grsec_enable_blackhole || (ret == 1 &&
72585+ (skb->dev->flags & IFF_LOOPBACK)))
72586+#endif
72587 tcp_v4_send_reset(NULL, skb);
72588 }
72589
72590@@ -2404,7 +2421,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
72591 0, /* non standard timer */
72592 0, /* open_requests have no inode */
72593 atomic_read(&sk->sk_refcnt),
72594+#ifdef CONFIG_GRKERNSEC_HIDESYM
72595+ NULL,
72596+#else
72597 req,
72598+#endif
72599 len);
72600 }
72601
72602@@ -2454,7 +2475,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
72603 sock_i_uid(sk),
72604 icsk->icsk_probes_out,
72605 sock_i_ino(sk),
72606- atomic_read(&sk->sk_refcnt), sk,
72607+ atomic_read(&sk->sk_refcnt),
72608+#ifdef CONFIG_GRKERNSEC_HIDESYM
72609+ NULL,
72610+#else
72611+ sk,
72612+#endif
72613 jiffies_to_clock_t(icsk->icsk_rto),
72614 jiffies_to_clock_t(icsk->icsk_ack.ato),
72615 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
72616@@ -2482,7 +2508,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
72617 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
72618 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
72619 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72620- atomic_read(&tw->tw_refcnt), tw, len);
72621+ atomic_read(&tw->tw_refcnt),
72622+#ifdef CONFIG_GRKERNSEC_HIDESYM
72623+ NULL,
72624+#else
72625+ tw,
72626+#endif
72627+ len);
72628 }
72629
72630 #define TMPSZ 150
72631diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
72632index 66363b6..b0654a3 100644
72633--- a/net/ipv4/tcp_minisocks.c
72634+++ b/net/ipv4/tcp_minisocks.c
72635@@ -27,6 +27,10 @@
72636 #include <net/inet_common.h>
72637 #include <net/xfrm.h>
72638
72639+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72640+extern int grsec_enable_blackhole;
72641+#endif
72642+
72643 int sysctl_tcp_syncookies __read_mostly = 1;
72644 EXPORT_SYMBOL(sysctl_tcp_syncookies);
72645
72646@@ -751,6 +755,10 @@ listen_overflow:
72647
72648 embryonic_reset:
72649 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
72650+
72651+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72652+ if (!grsec_enable_blackhole)
72653+#endif
72654 if (!(flg & TCP_FLAG_RST))
72655 req->rsk_ops->send_reset(sk, skb);
72656
72657diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
72658index 85ee7eb..53277ab 100644
72659--- a/net/ipv4/tcp_probe.c
72660+++ b/net/ipv4/tcp_probe.c
72661@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
72662 if (cnt + width >= len)
72663 break;
72664
72665- if (copy_to_user(buf + cnt, tbuf, width))
72666+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
72667 return -EFAULT;
72668 cnt += width;
72669 }
72670diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
72671index 2e0f0af..e2948bf 100644
72672--- a/net/ipv4/tcp_timer.c
72673+++ b/net/ipv4/tcp_timer.c
72674@@ -22,6 +22,10 @@
72675 #include <linux/gfp.h>
72676 #include <net/tcp.h>
72677
72678+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72679+extern int grsec_lastack_retries;
72680+#endif
72681+
72682 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
72683 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
72684 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
72685@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
72686 }
72687 }
72688
72689+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72690+ if ((sk->sk_state == TCP_LAST_ACK) &&
72691+ (grsec_lastack_retries > 0) &&
72692+ (grsec_lastack_retries < retry_until))
72693+ retry_until = grsec_lastack_retries;
72694+#endif
72695+
72696 if (retransmits_timed_out(sk, retry_until,
72697 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
72698 /* Has it gone just too far? */
72699diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
72700index 5a65eea..bd913a1 100644
72701--- a/net/ipv4/udp.c
72702+++ b/net/ipv4/udp.c
72703@@ -86,6 +86,7 @@
72704 #include <linux/types.h>
72705 #include <linux/fcntl.h>
72706 #include <linux/module.h>
72707+#include <linux/security.h>
72708 #include <linux/socket.h>
72709 #include <linux/sockios.h>
72710 #include <linux/igmp.h>
72711@@ -108,6 +109,10 @@
72712 #include <trace/events/udp.h>
72713 #include "udp_impl.h"
72714
72715+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72716+extern int grsec_enable_blackhole;
72717+#endif
72718+
72719 struct udp_table udp_table __read_mostly;
72720 EXPORT_SYMBOL(udp_table);
72721
72722@@ -565,6 +570,9 @@ found:
72723 return s;
72724 }
72725
72726+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
72727+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
72728+
72729 /*
72730 * This routine is called by the ICMP module when it gets some
72731 * sort of error condition. If err < 0 then the socket should
72732@@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
72733 dport = usin->sin_port;
72734 if (dport == 0)
72735 return -EINVAL;
72736+
72737+ err = gr_search_udp_sendmsg(sk, usin);
72738+ if (err)
72739+ return err;
72740 } else {
72741 if (sk->sk_state != TCP_ESTABLISHED)
72742 return -EDESTADDRREQ;
72743+
72744+ err = gr_search_udp_sendmsg(sk, NULL);
72745+ if (err)
72746+ return err;
72747+
72748 daddr = inet->inet_daddr;
72749 dport = inet->inet_dport;
72750 /* Open fast path for connected socket.
72751@@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
72752 udp_lib_checksum_complete(skb)) {
72753 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72754 IS_UDPLITE(sk));
72755- atomic_inc(&sk->sk_drops);
72756+ atomic_inc_unchecked(&sk->sk_drops);
72757 __skb_unlink(skb, rcvq);
72758 __skb_queue_tail(&list_kill, skb);
72759 }
72760@@ -1185,6 +1202,10 @@ try_again:
72761 if (!skb)
72762 goto out;
72763
72764+ err = gr_search_udp_recvmsg(sk, skb);
72765+ if (err)
72766+ goto out_free;
72767+
72768 ulen = skb->len - sizeof(struct udphdr);
72769 copied = len;
72770 if (copied > ulen)
72771@@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72772
72773 drop:
72774 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
72775- atomic_inc(&sk->sk_drops);
72776+ atomic_inc_unchecked(&sk->sk_drops);
72777 kfree_skb(skb);
72778 return -1;
72779 }
72780@@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
72781 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
72782
72783 if (!skb1) {
72784- atomic_inc(&sk->sk_drops);
72785+ atomic_inc_unchecked(&sk->sk_drops);
72786 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
72787 IS_UDPLITE(sk));
72788 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72789@@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
72790 goto csum_error;
72791
72792 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
72793+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72794+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72795+#endif
72796 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
72797
72798 /*
72799@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
72800 sk_wmem_alloc_get(sp),
72801 sk_rmem_alloc_get(sp),
72802 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72803- atomic_read(&sp->sk_refcnt), sp,
72804- atomic_read(&sp->sk_drops), len);
72805+ atomic_read(&sp->sk_refcnt),
72806+#ifdef CONFIG_GRKERNSEC_HIDESYM
72807+ NULL,
72808+#else
72809+ sp,
72810+#endif
72811+ atomic_read_unchecked(&sp->sk_drops), len);
72812 }
72813
72814 int udp4_seq_show(struct seq_file *seq, void *v)
72815diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
72816index 36806de..b86f74c 100644
72817--- a/net/ipv6/addrconf.c
72818+++ b/net/ipv6/addrconf.c
72819@@ -2149,7 +2149,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
72820 p.iph.ihl = 5;
72821 p.iph.protocol = IPPROTO_IPV6;
72822 p.iph.ttl = 64;
72823- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
72824+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
72825
72826 if (ops->ndo_do_ioctl) {
72827 mm_segment_t oldfs = get_fs();
72828diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
72829index 1567fb1..29af910 100644
72830--- a/net/ipv6/inet6_connection_sock.c
72831+++ b/net/ipv6/inet6_connection_sock.c
72832@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
72833 #ifdef CONFIG_XFRM
72834 {
72835 struct rt6_info *rt = (struct rt6_info *)dst;
72836- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
72837+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
72838 }
72839 #endif
72840 }
72841@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
72842 #ifdef CONFIG_XFRM
72843 if (dst) {
72844 struct rt6_info *rt = (struct rt6_info *)dst;
72845- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
72846+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
72847 __sk_dst_reset(sk);
72848 dst = NULL;
72849 }
72850diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
72851index 26cb08c..8af9877 100644
72852--- a/net/ipv6/ipv6_sockglue.c
72853+++ b/net/ipv6/ipv6_sockglue.c
72854@@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
72855 if (sk->sk_type != SOCK_STREAM)
72856 return -ENOPROTOOPT;
72857
72858- msg.msg_control = optval;
72859+ msg.msg_control = (void __force_kernel *)optval;
72860 msg.msg_controllen = len;
72861 msg.msg_flags = flags;
72862
72863diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
72864index 331af3b..7789844 100644
72865--- a/net/ipv6/raw.c
72866+++ b/net/ipv6/raw.c
72867@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
72868 {
72869 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
72870 skb_checksum_complete(skb)) {
72871- atomic_inc(&sk->sk_drops);
72872+ atomic_inc_unchecked(&sk->sk_drops);
72873 kfree_skb(skb);
72874 return NET_RX_DROP;
72875 }
72876@@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
72877 struct raw6_sock *rp = raw6_sk(sk);
72878
72879 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
72880- atomic_inc(&sk->sk_drops);
72881+ atomic_inc_unchecked(&sk->sk_drops);
72882 kfree_skb(skb);
72883 return NET_RX_DROP;
72884 }
72885@@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
72886
72887 if (inet->hdrincl) {
72888 if (skb_checksum_complete(skb)) {
72889- atomic_inc(&sk->sk_drops);
72890+ atomic_inc_unchecked(&sk->sk_drops);
72891 kfree_skb(skb);
72892 return NET_RX_DROP;
72893 }
72894@@ -601,7 +601,7 @@ out:
72895 return err;
72896 }
72897
72898-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
72899+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
72900 struct flowi6 *fl6, struct dst_entry **dstp,
72901 unsigned int flags)
72902 {
72903@@ -909,12 +909,15 @@ do_confirm:
72904 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
72905 char __user *optval, int optlen)
72906 {
72907+ struct icmp6_filter filter;
72908+
72909 switch (optname) {
72910 case ICMPV6_FILTER:
72911 if (optlen > sizeof(struct icmp6_filter))
72912 optlen = sizeof(struct icmp6_filter);
72913- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72914+ if (copy_from_user(&filter, optval, optlen))
72915 return -EFAULT;
72916+ raw6_sk(sk)->filter = filter;
72917 return 0;
72918 default:
72919 return -ENOPROTOOPT;
72920@@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
72921 char __user *optval, int __user *optlen)
72922 {
72923 int len;
72924+ struct icmp6_filter filter;
72925
72926 switch (optname) {
72927 case ICMPV6_FILTER:
72928@@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
72929 len = sizeof(struct icmp6_filter);
72930 if (put_user(len, optlen))
72931 return -EFAULT;
72932- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72933+ filter = raw6_sk(sk)->filter;
72934+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
72935 return -EFAULT;
72936 return 0;
72937 default:
72938@@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
72939 0, 0L, 0,
72940 sock_i_uid(sp), 0,
72941 sock_i_ino(sp),
72942- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72943+ atomic_read(&sp->sk_refcnt),
72944+#ifdef CONFIG_GRKERNSEC_HIDESYM
72945+ NULL,
72946+#else
72947+ sp,
72948+#endif
72949+ atomic_read_unchecked(&sp->sk_drops));
72950 }
72951
72952 static int raw6_seq_show(struct seq_file *seq, void *v)
72953diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
72954index 2dea4bb..dca8ac5 100644
72955--- a/net/ipv6/tcp_ipv6.c
72956+++ b/net/ipv6/tcp_ipv6.c
72957@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
72958 }
72959 #endif
72960
72961+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72962+extern int grsec_enable_blackhole;
72963+#endif
72964+
72965 static void tcp_v6_hash(struct sock *sk)
72966 {
72967 if (sk->sk_state != TCP_CLOSE) {
72968@@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
72969 return 0;
72970
72971 reset:
72972+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72973+ if (!grsec_enable_blackhole)
72974+#endif
72975 tcp_v6_send_reset(sk, skb);
72976 discard:
72977 if (opt_skb)
72978@@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
72979 TCP_SKB_CB(skb)->sacked = 0;
72980
72981 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72982- if (!sk)
72983+ if (!sk) {
72984+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72985+ ret = 1;
72986+#endif
72987 goto no_tcp_socket;
72988+ }
72989
72990 process:
72991- if (sk->sk_state == TCP_TIME_WAIT)
72992+ if (sk->sk_state == TCP_TIME_WAIT) {
72993+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72994+ ret = 2;
72995+#endif
72996 goto do_time_wait;
72997+ }
72998
72999 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73000 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73001@@ -1783,6 +1798,10 @@ no_tcp_socket:
73002 bad_packet:
73003 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73004 } else {
73005+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73006+ if (!grsec_enable_blackhole || (ret == 1 &&
73007+ (skb->dev->flags & IFF_LOOPBACK)))
73008+#endif
73009 tcp_v6_send_reset(NULL, skb);
73010 }
73011
73012@@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
73013 uid,
73014 0, /* non standard timer */
73015 0, /* open_requests have no inode */
73016- 0, req);
73017+ 0,
73018+#ifdef CONFIG_GRKERNSEC_HIDESYM
73019+ NULL
73020+#else
73021+ req
73022+#endif
73023+ );
73024 }
73025
73026 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73027@@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73028 sock_i_uid(sp),
73029 icsk->icsk_probes_out,
73030 sock_i_ino(sp),
73031- atomic_read(&sp->sk_refcnt), sp,
73032+ atomic_read(&sp->sk_refcnt),
73033+#ifdef CONFIG_GRKERNSEC_HIDESYM
73034+ NULL,
73035+#else
73036+ sp,
73037+#endif
73038 jiffies_to_clock_t(icsk->icsk_rto),
73039 jiffies_to_clock_t(icsk->icsk_ack.ato),
73040 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73041@@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
73042 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73043 tw->tw_substate, 0, 0,
73044 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73045- atomic_read(&tw->tw_refcnt), tw);
73046+ atomic_read(&tw->tw_refcnt),
73047+#ifdef CONFIG_GRKERNSEC_HIDESYM
73048+ NULL
73049+#else
73050+ tw
73051+#endif
73052+ );
73053 }
73054
73055 static int tcp6_seq_show(struct seq_file *seq, void *v)
73056diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
73057index 8c25419..47a51ae 100644
73058--- a/net/ipv6/udp.c
73059+++ b/net/ipv6/udp.c
73060@@ -50,6 +50,10 @@
73061 #include <linux/seq_file.h>
73062 #include "udp_impl.h"
73063
73064+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73065+extern int grsec_enable_blackhole;
73066+#endif
73067+
73068 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73069 {
73070 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73071@@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
73072
73073 return 0;
73074 drop:
73075- atomic_inc(&sk->sk_drops);
73076+ atomic_inc_unchecked(&sk->sk_drops);
73077 drop_no_sk_drops_inc:
73078 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73079 kfree_skb(skb);
73080@@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73081 continue;
73082 }
73083 drop:
73084- atomic_inc(&sk->sk_drops);
73085+ atomic_inc_unchecked(&sk->sk_drops);
73086 UDP6_INC_STATS_BH(sock_net(sk),
73087 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73088 UDP6_INC_STATS_BH(sock_net(sk),
73089@@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73090 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73091 proto == IPPROTO_UDPLITE);
73092
73093+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73094+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73095+#endif
73096 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73097
73098 kfree_skb(skb);
73099@@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73100 if (!sock_owned_by_user(sk))
73101 udpv6_queue_rcv_skb(sk, skb);
73102 else if (sk_add_backlog(sk, skb)) {
73103- atomic_inc(&sk->sk_drops);
73104+ atomic_inc_unchecked(&sk->sk_drops);
73105 bh_unlock_sock(sk);
73106 sock_put(sk);
73107 goto discard;
73108@@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
73109 0, 0L, 0,
73110 sock_i_uid(sp), 0,
73111 sock_i_ino(sp),
73112- atomic_read(&sp->sk_refcnt), sp,
73113- atomic_read(&sp->sk_drops));
73114+ atomic_read(&sp->sk_refcnt),
73115+#ifdef CONFIG_GRKERNSEC_HIDESYM
73116+ NULL,
73117+#else
73118+ sp,
73119+#endif
73120+ atomic_read_unchecked(&sp->sk_drops));
73121 }
73122
73123 int udp6_seq_show(struct seq_file *seq, void *v)
73124diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
73125index 253695d..9481ce8 100644
73126--- a/net/irda/ircomm/ircomm_tty.c
73127+++ b/net/irda/ircomm/ircomm_tty.c
73128@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73129 add_wait_queue(&self->open_wait, &wait);
73130
73131 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73132- __FILE__,__LINE__, tty->driver->name, self->open_count );
73133+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73134
73135 /* As far as I can see, we protect open_count - Jean II */
73136 spin_lock_irqsave(&self->spinlock, flags);
73137 if (!tty_hung_up_p(filp)) {
73138 extra_count = 1;
73139- self->open_count--;
73140+ local_dec(&self->open_count);
73141 }
73142 spin_unlock_irqrestore(&self->spinlock, flags);
73143- self->blocked_open++;
73144+ local_inc(&self->blocked_open);
73145
73146 while (1) {
73147 if (tty->termios->c_cflag & CBAUD) {
73148@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73149 }
73150
73151 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73152- __FILE__,__LINE__, tty->driver->name, self->open_count );
73153+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73154
73155 schedule();
73156 }
73157@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73158 if (extra_count) {
73159 /* ++ is not atomic, so this should be protected - Jean II */
73160 spin_lock_irqsave(&self->spinlock, flags);
73161- self->open_count++;
73162+ local_inc(&self->open_count);
73163 spin_unlock_irqrestore(&self->spinlock, flags);
73164 }
73165- self->blocked_open--;
73166+ local_dec(&self->blocked_open);
73167
73168 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
73169- __FILE__,__LINE__, tty->driver->name, self->open_count);
73170+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
73171
73172 if (!retval)
73173 self->flags |= ASYNC_NORMAL_ACTIVE;
73174@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
73175 }
73176 /* ++ is not atomic, so this should be protected - Jean II */
73177 spin_lock_irqsave(&self->spinlock, flags);
73178- self->open_count++;
73179+ local_inc(&self->open_count);
73180
73181 tty->driver_data = self;
73182 self->tty = tty;
73183 spin_unlock_irqrestore(&self->spinlock, flags);
73184
73185 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
73186- self->line, self->open_count);
73187+ self->line, local_read(&self->open_count));
73188
73189 /* Not really used by us, but lets do it anyway */
73190 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
73191@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73192 return;
73193 }
73194
73195- if ((tty->count == 1) && (self->open_count != 1)) {
73196+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
73197 /*
73198 * Uh, oh. tty->count is 1, which means that the tty
73199 * structure will be freed. state->count should always
73200@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73201 */
73202 IRDA_DEBUG(0, "%s(), bad serial port count; "
73203 "tty->count is 1, state->count is %d\n", __func__ ,
73204- self->open_count);
73205- self->open_count = 1;
73206+ local_read(&self->open_count));
73207+ local_set(&self->open_count, 1);
73208 }
73209
73210- if (--self->open_count < 0) {
73211+ if (local_dec_return(&self->open_count) < 0) {
73212 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
73213- __func__, self->line, self->open_count);
73214- self->open_count = 0;
73215+ __func__, self->line, local_read(&self->open_count));
73216+ local_set(&self->open_count, 0);
73217 }
73218- if (self->open_count) {
73219+ if (local_read(&self->open_count)) {
73220 spin_unlock_irqrestore(&self->spinlock, flags);
73221
73222 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
73223@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73224 tty->closing = 0;
73225 self->tty = NULL;
73226
73227- if (self->blocked_open) {
73228+ if (local_read(&self->blocked_open)) {
73229 if (self->close_delay)
73230 schedule_timeout_interruptible(self->close_delay);
73231 wake_up_interruptible(&self->open_wait);
73232@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
73233 spin_lock_irqsave(&self->spinlock, flags);
73234 self->flags &= ~ASYNC_NORMAL_ACTIVE;
73235 self->tty = NULL;
73236- self->open_count = 0;
73237+ local_set(&self->open_count, 0);
73238 spin_unlock_irqrestore(&self->spinlock, flags);
73239
73240 wake_up_interruptible(&self->open_wait);
73241@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
73242 seq_putc(m, '\n');
73243
73244 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
73245- seq_printf(m, "Open count: %d\n", self->open_count);
73246+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
73247 seq_printf(m, "Max data size: %d\n", self->max_data_size);
73248 seq_printf(m, "Max header size: %d\n", self->max_header_size);
73249
73250diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
73251index 274d150..656a144 100644
73252--- a/net/iucv/af_iucv.c
73253+++ b/net/iucv/af_iucv.c
73254@@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
73255
73256 write_lock_bh(&iucv_sk_list.lock);
73257
73258- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73259+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73260 while (__iucv_get_sock_by_name(name)) {
73261 sprintf(name, "%08x",
73262- atomic_inc_return(&iucv_sk_list.autobind_name));
73263+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73264 }
73265
73266 write_unlock_bh(&iucv_sk_list.lock);
73267diff --git a/net/key/af_key.c b/net/key/af_key.c
73268index 1e733e9..3d73c9f 100644
73269--- a/net/key/af_key.c
73270+++ b/net/key/af_key.c
73271@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
73272 static u32 get_acqseq(void)
73273 {
73274 u32 res;
73275- static atomic_t acqseq;
73276+ static atomic_unchecked_t acqseq;
73277
73278 do {
73279- res = atomic_inc_return(&acqseq);
73280+ res = atomic_inc_return_unchecked(&acqseq);
73281 } while (!res);
73282 return res;
73283 }
73284diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
73285index 73495f1..ad51356 100644
73286--- a/net/mac80211/ieee80211_i.h
73287+++ b/net/mac80211/ieee80211_i.h
73288@@ -27,6 +27,7 @@
73289 #include <net/ieee80211_radiotap.h>
73290 #include <net/cfg80211.h>
73291 #include <net/mac80211.h>
73292+#include <asm/local.h>
73293 #include "key.h"
73294 #include "sta_info.h"
73295
73296@@ -764,7 +765,7 @@ struct ieee80211_local {
73297 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73298 spinlock_t queue_stop_reason_lock;
73299
73300- int open_count;
73301+ local_t open_count;
73302 int monitors, cooked_mntrs;
73303 /* number of interfaces with corresponding FIF_ flags */
73304 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73305diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
73306index 30d7355..e260095 100644
73307--- a/net/mac80211/iface.c
73308+++ b/net/mac80211/iface.c
73309@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73310 break;
73311 }
73312
73313- if (local->open_count == 0) {
73314+ if (local_read(&local->open_count) == 0) {
73315 res = drv_start(local);
73316 if (res)
73317 goto err_del_bss;
73318@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73319 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73320
73321 if (!is_valid_ether_addr(dev->dev_addr)) {
73322- if (!local->open_count)
73323+ if (!local_read(&local->open_count))
73324 drv_stop(local);
73325 return -EADDRNOTAVAIL;
73326 }
73327@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73328 mutex_unlock(&local->mtx);
73329
73330 if (coming_up)
73331- local->open_count++;
73332+ local_inc(&local->open_count);
73333
73334 if (hw_reconf_flags) {
73335 ieee80211_hw_config(local, hw_reconf_flags);
73336@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73337 err_del_interface:
73338 drv_remove_interface(local, &sdata->vif);
73339 err_stop:
73340- if (!local->open_count)
73341+ if (!local_read(&local->open_count))
73342 drv_stop(local);
73343 err_del_bss:
73344 sdata->bss = NULL;
73345@@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73346 }
73347
73348 if (going_down)
73349- local->open_count--;
73350+ local_dec(&local->open_count);
73351
73352 switch (sdata->vif.type) {
73353 case NL80211_IFTYPE_AP_VLAN:
73354@@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73355
73356 ieee80211_recalc_ps(local, -1);
73357
73358- if (local->open_count == 0) {
73359+ if (local_read(&local->open_count) == 0) {
73360 if (local->ops->napi_poll)
73361 napi_disable(&local->napi);
73362 ieee80211_clear_tx_pending(local);
73363diff --git a/net/mac80211/main.c b/net/mac80211/main.c
73364index a7536fd..4039cc0 100644
73365--- a/net/mac80211/main.c
73366+++ b/net/mac80211/main.c
73367@@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
73368 local->hw.conf.power_level = power;
73369 }
73370
73371- if (changed && local->open_count) {
73372+ if (changed && local_read(&local->open_count)) {
73373 ret = drv_config(local, changed);
73374 /*
73375 * Goal:
73376diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
73377index 9ee7164..56c5061 100644
73378--- a/net/mac80211/pm.c
73379+++ b/net/mac80211/pm.c
73380@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73381 struct ieee80211_sub_if_data *sdata;
73382 struct sta_info *sta;
73383
73384- if (!local->open_count)
73385+ if (!local_read(&local->open_count))
73386 goto suspend;
73387
73388 ieee80211_scan_cancel(local);
73389@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73390 cancel_work_sync(&local->dynamic_ps_enable_work);
73391 del_timer_sync(&local->dynamic_ps_timer);
73392
73393- local->wowlan = wowlan && local->open_count;
73394+ local->wowlan = wowlan && local_read(&local->open_count);
73395 if (local->wowlan) {
73396 int err = drv_suspend(local, wowlan);
73397 if (err < 0) {
73398@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73399 }
73400
73401 /* stop hardware - this must stop RX */
73402- if (local->open_count)
73403+ if (local_read(&local->open_count))
73404 ieee80211_stop_device(local);
73405
73406 suspend:
73407diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
73408index 5a5a776..9600b11 100644
73409--- a/net/mac80211/rate.c
73410+++ b/net/mac80211/rate.c
73411@@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
73412
73413 ASSERT_RTNL();
73414
73415- if (local->open_count)
73416+ if (local_read(&local->open_count))
73417 return -EBUSY;
73418
73419 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
73420diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
73421index c97a065..ff61928 100644
73422--- a/net/mac80211/rc80211_pid_debugfs.c
73423+++ b/net/mac80211/rc80211_pid_debugfs.c
73424@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
73425
73426 spin_unlock_irqrestore(&events->lock, status);
73427
73428- if (copy_to_user(buf, pb, p))
73429+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
73430 return -EFAULT;
73431
73432 return p;
73433diff --git a/net/mac80211/util.c b/net/mac80211/util.c
73434index d5230ec..c604b21 100644
73435--- a/net/mac80211/util.c
73436+++ b/net/mac80211/util.c
73437@@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
73438 drv_set_coverage_class(local, hw->wiphy->coverage_class);
73439
73440 /* everything else happens only if HW was up & running */
73441- if (!local->open_count)
73442+ if (!local_read(&local->open_count))
73443 goto wake_up;
73444
73445 /*
73446diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
73447index d5597b7..ab6d39c 100644
73448--- a/net/netfilter/Kconfig
73449+++ b/net/netfilter/Kconfig
73450@@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
73451
73452 To compile it as a module, choose M here. If unsure, say N.
73453
73454+config NETFILTER_XT_MATCH_GRADM
73455+ tristate '"gradm" match support'
73456+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73457+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73458+ ---help---
73459+ The gradm match allows to match on grsecurity RBAC being enabled.
73460+ It is useful when iptables rules are applied early on bootup to
73461+ prevent connections to the machine (except from a trusted host)
73462+ while the RBAC system is disabled.
73463+
73464 config NETFILTER_XT_MATCH_HASHLIMIT
73465 tristate '"hashlimit" match support'
73466 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73467diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
73468index 1a02853..5d8c22e 100644
73469--- a/net/netfilter/Makefile
73470+++ b/net/netfilter/Makefile
73471@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
73472 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
73473 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73474 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73475+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73476 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73477 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73478 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73479diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
73480index 29fa5ba..8debc79 100644
73481--- a/net/netfilter/ipvs/ip_vs_conn.c
73482+++ b/net/netfilter/ipvs/ip_vs_conn.c
73483@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
73484 /* Increase the refcnt counter of the dest */
73485 atomic_inc(&dest->refcnt);
73486
73487- conn_flags = atomic_read(&dest->conn_flags);
73488+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
73489 if (cp->protocol != IPPROTO_UDP)
73490 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
73491 /* Bind with the destination and its corresponding transmitter */
73492@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
73493 atomic_set(&cp->refcnt, 1);
73494
73495 atomic_set(&cp->n_control, 0);
73496- atomic_set(&cp->in_pkts, 0);
73497+ atomic_set_unchecked(&cp->in_pkts, 0);
73498
73499 atomic_inc(&ipvs->conn_count);
73500 if (flags & IP_VS_CONN_F_NO_CPORT)
73501@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
73502
73503 /* Don't drop the entry if its number of incoming packets is not
73504 located in [0, 8] */
73505- i = atomic_read(&cp->in_pkts);
73506+ i = atomic_read_unchecked(&cp->in_pkts);
73507 if (i > 8 || i < 0) return 0;
73508
73509 if (!todrop_rate[i]) return 0;
73510diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
73511index 093cc32..9209ae1 100644
73512--- a/net/netfilter/ipvs/ip_vs_core.c
73513+++ b/net/netfilter/ipvs/ip_vs_core.c
73514@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
73515 ret = cp->packet_xmit(skb, cp, pd->pp);
73516 /* do not touch skb anymore */
73517
73518- atomic_inc(&cp->in_pkts);
73519+ atomic_inc_unchecked(&cp->in_pkts);
73520 ip_vs_conn_put(cp);
73521 return ret;
73522 }
73523@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
73524 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
73525 pkts = sysctl_sync_threshold(ipvs);
73526 else
73527- pkts = atomic_add_return(1, &cp->in_pkts);
73528+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73529
73530 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
73531 cp->protocol == IPPROTO_SCTP) {
73532diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
73533index e1a66cf..0910076 100644
73534--- a/net/netfilter/ipvs/ip_vs_ctl.c
73535+++ b/net/netfilter/ipvs/ip_vs_ctl.c
73536@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
73537 ip_vs_rs_hash(ipvs, dest);
73538 write_unlock_bh(&ipvs->rs_lock);
73539 }
73540- atomic_set(&dest->conn_flags, conn_flags);
73541+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
73542
73543 /* bind the service */
73544 if (!dest->svc) {
73545@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
73546 " %-7s %-6d %-10d %-10d\n",
73547 &dest->addr.in6,
73548 ntohs(dest->port),
73549- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73550+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73551 atomic_read(&dest->weight),
73552 atomic_read(&dest->activeconns),
73553 atomic_read(&dest->inactconns));
73554@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
73555 "%-7s %-6d %-10d %-10d\n",
73556 ntohl(dest->addr.ip),
73557 ntohs(dest->port),
73558- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73559+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73560 atomic_read(&dest->weight),
73561 atomic_read(&dest->activeconns),
73562 atomic_read(&dest->inactconns));
73563@@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
73564
73565 entry.addr = dest->addr.ip;
73566 entry.port = dest->port;
73567- entry.conn_flags = atomic_read(&dest->conn_flags);
73568+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
73569 entry.weight = atomic_read(&dest->weight);
73570 entry.u_threshold = dest->u_threshold;
73571 entry.l_threshold = dest->l_threshold;
73572@@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
73573 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73574
73575 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73576- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73577+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73578 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
73579 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
73580 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
73581diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
73582index 2b6678c0..aaa41fc 100644
73583--- a/net/netfilter/ipvs/ip_vs_sync.c
73584+++ b/net/netfilter/ipvs/ip_vs_sync.c
73585@@ -649,7 +649,7 @@ control:
73586 * i.e only increment in_pkts for Templates.
73587 */
73588 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
73589- int pkts = atomic_add_return(1, &cp->in_pkts);
73590+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73591
73592 if (pkts % sysctl_sync_period(ipvs) != 1)
73593 return;
73594@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
73595
73596 if (opt)
73597 memcpy(&cp->in_seq, opt, sizeof(*opt));
73598- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73599+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73600 cp->state = state;
73601 cp->old_state = cp->state;
73602 /*
73603diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
73604index aa2d720..d8aa111 100644
73605--- a/net/netfilter/ipvs/ip_vs_xmit.c
73606+++ b/net/netfilter/ipvs/ip_vs_xmit.c
73607@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
73608 else
73609 rc = NF_ACCEPT;
73610 /* do not touch skb anymore */
73611- atomic_inc(&cp->in_pkts);
73612+ atomic_inc_unchecked(&cp->in_pkts);
73613 goto out;
73614 }
73615
73616@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
73617 else
73618 rc = NF_ACCEPT;
73619 /* do not touch skb anymore */
73620- atomic_inc(&cp->in_pkts);
73621+ atomic_inc_unchecked(&cp->in_pkts);
73622 goto out;
73623 }
73624
73625diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
73626index 66b2c54..c7884e3 100644
73627--- a/net/netfilter/nfnetlink_log.c
73628+++ b/net/netfilter/nfnetlink_log.c
73629@@ -70,7 +70,7 @@ struct nfulnl_instance {
73630 };
73631
73632 static DEFINE_SPINLOCK(instances_lock);
73633-static atomic_t global_seq;
73634+static atomic_unchecked_t global_seq;
73635
73636 #define INSTANCE_BUCKETS 16
73637 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73638@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
73639 /* global sequence number */
73640 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73641 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73642- htonl(atomic_inc_return(&global_seq)));
73643+ htonl(atomic_inc_return_unchecked(&global_seq)));
73644
73645 if (data_len) {
73646 struct nlattr *nla;
73647diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
73648new file mode 100644
73649index 0000000..6905327
73650--- /dev/null
73651+++ b/net/netfilter/xt_gradm.c
73652@@ -0,0 +1,51 @@
73653+/*
73654+ * gradm match for netfilter
73655