]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.32.49-201112082138.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.49-201112082138.patch
CommitLineData
dd203dd8
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index e1efc40..47f0daf 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9+*.cis
10 *.cpio
11 *.csp
12+*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18+*.gcno
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *_MODULES
32+*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36@@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40+GPATH
41+GRTAGS
42+GSYMS
43+GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49+PERF*
50 SCCS
51 System.map*
52 TAGS
53@@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57+capability_names.h
58+capflags.c
59 classlist.h*
60+clut_vga16.c
61+common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65@@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69+config.c
70+config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74@@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78+gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90+initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103+mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110+mkpiggy
111 mkprep
112+mkregtable
113 mktables
114 mktree
115 modpost
116@@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120+piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124@@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128+regdb.c
129 relocs
130+rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152+vmlinux.bin.all
153+vmlinux.bin.bz2
154 vmlinux.lds
155+vmlinux.relocs
156+voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zoffset.h
169diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170index c840e7d..f4c451c 100644
171--- a/Documentation/kernel-parameters.txt
172+++ b/Documentation/kernel-parameters.txt
173@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178+ virtualization environments that don't cope well with the
179+ expand down segment used by UDEREF on X86-32 or the frequent
180+ page table updates on X86-64.
181+
182+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183+
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187diff --git a/Makefile b/Makefile
188index a19b0e8..f773d59 100644
189--- a/Makefile
190+++ b/Makefile
191@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196-HOSTCXXFLAGS = -O2
197+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203@@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207-PHONY += scripts_basic
208-scripts_basic:
209+PHONY += scripts_basic gcc-plugins
210+scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214@@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218- cscope TAGS tags help %docs check% \
219+ cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223@@ -526,6 +527,42 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227+ifndef DISABLE_PAX_PLUGINS
228+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231+endif
232+ifdef CONFIG_PAX_MEMORY_STACKLEAK
233+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235+endif
236+ifdef CONFIG_KALLOCSTAT_PLUGIN
237+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238+endif
239+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
242+endif
243+ifdef CONFIG_CHECKER_PLUGIN
244+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
245+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
246+endif
247+endif
248+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
249+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
250+gcc-plugins:
251+ $(Q)$(MAKE) $(build)=tools/gcc
252+else
253+gcc-plugins:
254+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
255+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
256+else
257+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
258+endif
259+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
260+endif
261+endif
262+
263 include $(srctree)/arch/$(SRCARCH)/Makefile
264
265 ifneq ($(CONFIG_FRAME_WARN),0)
266@@ -647,7 +684,7 @@ export mod_strip_cmd
267
268
269 ifeq ($(KBUILD_EXTMOD),)
270-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
271+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
272
273 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
274 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
275@@ -868,6 +905,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
276
277 # The actual objects are generated when descending,
278 # make sure no implicit rule kicks in
279+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
280 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
281
282 # Handle descending into subdirectories listed in $(vmlinux-dirs)
283@@ -877,7 +915,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
284 # Error messages still appears in the original language
285
286 PHONY += $(vmlinux-dirs)
287-$(vmlinux-dirs): prepare scripts
288+$(vmlinux-dirs): gcc-plugins prepare scripts
289 $(Q)$(MAKE) $(build)=$@
290
291 # Build the kernel release string
292@@ -986,6 +1024,7 @@ prepare0: archprepare FORCE
293 $(Q)$(MAKE) $(build)=. missing-syscalls
294
295 # All the preparing..
296+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
297 prepare: prepare0
298
299 # The asm symlink changes when $(ARCH) changes.
300@@ -1127,6 +1166,7 @@ all: modules
301 # using awk while concatenating to the final file.
302
303 PHONY += modules
304+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
305 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
306 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
307 @$(kecho) ' Building modules, stage 2.';
308@@ -1136,7 +1176,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
309
310 # Target to prepare building external modules
311 PHONY += modules_prepare
312-modules_prepare: prepare scripts
313+modules_prepare: gcc-plugins prepare scripts
314
315 # Target to install modules
316 PHONY += modules_install
317@@ -1201,7 +1241,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
318 include/linux/autoconf.h include/linux/version.h \
319 include/linux/utsrelease.h \
320 include/linux/bounds.h include/asm*/asm-offsets.h \
321- Module.symvers Module.markers tags TAGS cscope*
322+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
323
324 # clean - Delete most, but leave enough to build external modules
325 #
326@@ -1245,7 +1285,7 @@ distclean: mrproper
327 @find $(srctree) $(RCS_FIND_IGNORE) \
328 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
329 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
330- -o -name '.*.rej' -o -size 0 \
331+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
332 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
333 -type f -print | xargs rm -f
334
335@@ -1292,6 +1332,7 @@ help:
336 @echo ' modules_prepare - Set up for building external modules'
337 @echo ' tags/TAGS - Generate tags file for editors'
338 @echo ' cscope - Generate cscope index'
339+ @echo ' gtags - Generate GNU GLOBAL index'
340 @echo ' kernelrelease - Output the release version string'
341 @echo ' kernelversion - Output the version stored in Makefile'
342 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
343@@ -1393,6 +1434,7 @@ PHONY += $(module-dirs) modules
344 $(module-dirs): crmodverdir $(objtree)/Module.symvers
345 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
346
347+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
348 modules: $(module-dirs)
349 @$(kecho) ' Building modules, stage 2.';
350 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
351@@ -1448,7 +1490,7 @@ endif # KBUILD_EXTMOD
352 quiet_cmd_tags = GEN $@
353 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
354
355-tags TAGS cscope: FORCE
356+tags TAGS cscope gtags: FORCE
357 $(call cmd,tags)
358
359 # Scripts to check various things for consistency
360@@ -1513,17 +1555,19 @@ else
361 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
362 endif
363
364-%.s: %.c prepare scripts FORCE
365+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
366+%.s: %.c gcc-plugins prepare scripts FORCE
367 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
368 %.i: %.c prepare scripts FORCE
369 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
370-%.o: %.c prepare scripts FORCE
371+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
372+%.o: %.c gcc-plugins prepare scripts FORCE
373 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
374 %.lst: %.c prepare scripts FORCE
375 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
376-%.s: %.S prepare scripts FORCE
377+%.s: %.S gcc-plugins prepare scripts FORCE
378 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
379-%.o: %.S prepare scripts FORCE
380+%.o: %.S gcc-plugins prepare scripts FORCE
381 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
382 %.symtypes: %.c prepare scripts FORCE
383 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
384@@ -1533,11 +1577,13 @@ endif
385 $(cmd_crmodverdir)
386 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
387 $(build)=$(build-dir)
388-%/: prepare scripts FORCE
389+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
390+%/: gcc-plugins prepare scripts FORCE
391 $(cmd_crmodverdir)
392 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
393 $(build)=$(build-dir)
394-%.ko: prepare scripts FORCE
395+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
396+%.ko: gcc-plugins prepare scripts FORCE
397 $(cmd_crmodverdir)
398 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
399 $(build)=$(build-dir) $(@:.ko=.o)
400diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
401index 5c75c1b..c82f878 100644
402--- a/arch/alpha/include/asm/elf.h
403+++ b/arch/alpha/include/asm/elf.h
404@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
405
406 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
407
408+#ifdef CONFIG_PAX_ASLR
409+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
410+
411+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
412+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
413+#endif
414+
415 /* $0 is set by ld.so to a pointer to a function which might be
416 registered using atexit. This provides a mean for the dynamic
417 linker to call DT_FINI functions for shared libraries that have
418diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
419index 3f0c59f..cf1e100 100644
420--- a/arch/alpha/include/asm/pgtable.h
421+++ b/arch/alpha/include/asm/pgtable.h
422@@ -101,6 +101,17 @@ struct vm_area_struct;
423 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
424 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
425 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
426+
427+#ifdef CONFIG_PAX_PAGEEXEC
428+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
429+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
430+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
431+#else
432+# define PAGE_SHARED_NOEXEC PAGE_SHARED
433+# define PAGE_COPY_NOEXEC PAGE_COPY
434+# define PAGE_READONLY_NOEXEC PAGE_READONLY
435+#endif
436+
437 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
438
439 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
440diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
441index ebc3c89..20cfa63 100644
442--- a/arch/alpha/kernel/module.c
443+++ b/arch/alpha/kernel/module.c
444@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
445
446 /* The small sections were sorted to the end of the segment.
447 The following should definitely cover them. */
448- gp = (u64)me->module_core + me->core_size - 0x8000;
449+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
450 got = sechdrs[me->arch.gotsecindex].sh_addr;
451
452 for (i = 0; i < n; i++) {
453diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
454index a94e49c..d71dd44 100644
455--- a/arch/alpha/kernel/osf_sys.c
456+++ b/arch/alpha/kernel/osf_sys.c
457@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
458 /* At this point: (!vma || addr < vma->vm_end). */
459 if (limit - len < addr)
460 return -ENOMEM;
461- if (!vma || addr + len <= vma->vm_start)
462+ if (check_heap_stack_gap(vma, addr, len))
463 return addr;
464 addr = vma->vm_end;
465 vma = vma->vm_next;
466@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
467 merely specific addresses, but regions of memory -- perhaps
468 this feature should be incorporated into all ports? */
469
470+#ifdef CONFIG_PAX_RANDMMAP
471+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
472+#endif
473+
474 if (addr) {
475 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
476 if (addr != (unsigned long) -ENOMEM)
477@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
478 }
479
480 /* Next, try allocating at TASK_UNMAPPED_BASE. */
481- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
482- len, limit);
483+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
484+
485 if (addr != (unsigned long) -ENOMEM)
486 return addr;
487
488diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
489index 00a31de..2ded0f2 100644
490--- a/arch/alpha/mm/fault.c
491+++ b/arch/alpha/mm/fault.c
492@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
493 __reload_thread(pcb);
494 }
495
496+#ifdef CONFIG_PAX_PAGEEXEC
497+/*
498+ * PaX: decide what to do with offenders (regs->pc = fault address)
499+ *
500+ * returns 1 when task should be killed
501+ * 2 when patched PLT trampoline was detected
502+ * 3 when unpatched PLT trampoline was detected
503+ */
504+static int pax_handle_fetch_fault(struct pt_regs *regs)
505+{
506+
507+#ifdef CONFIG_PAX_EMUPLT
508+ int err;
509+
510+ do { /* PaX: patched PLT emulation #1 */
511+ unsigned int ldah, ldq, jmp;
512+
513+ err = get_user(ldah, (unsigned int *)regs->pc);
514+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
515+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
516+
517+ if (err)
518+ break;
519+
520+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
521+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
522+ jmp == 0x6BFB0000U)
523+ {
524+ unsigned long r27, addr;
525+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
526+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
527+
528+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
529+ err = get_user(r27, (unsigned long *)addr);
530+ if (err)
531+ break;
532+
533+ regs->r27 = r27;
534+ regs->pc = r27;
535+ return 2;
536+ }
537+ } while (0);
538+
539+ do { /* PaX: patched PLT emulation #2 */
540+ unsigned int ldah, lda, br;
541+
542+ err = get_user(ldah, (unsigned int *)regs->pc);
543+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
544+ err |= get_user(br, (unsigned int *)(regs->pc+8));
545+
546+ if (err)
547+ break;
548+
549+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
550+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
551+ (br & 0xFFE00000U) == 0xC3E00000U)
552+ {
553+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
554+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
555+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
556+
557+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
558+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
559+ return 2;
560+ }
561+ } while (0);
562+
563+ do { /* PaX: unpatched PLT emulation */
564+ unsigned int br;
565+
566+ err = get_user(br, (unsigned int *)regs->pc);
567+
568+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
569+ unsigned int br2, ldq, nop, jmp;
570+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
571+
572+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
573+ err = get_user(br2, (unsigned int *)addr);
574+ err |= get_user(ldq, (unsigned int *)(addr+4));
575+ err |= get_user(nop, (unsigned int *)(addr+8));
576+ err |= get_user(jmp, (unsigned int *)(addr+12));
577+ err |= get_user(resolver, (unsigned long *)(addr+16));
578+
579+ if (err)
580+ break;
581+
582+ if (br2 == 0xC3600000U &&
583+ ldq == 0xA77B000CU &&
584+ nop == 0x47FF041FU &&
585+ jmp == 0x6B7B0000U)
586+ {
587+ regs->r28 = regs->pc+4;
588+ regs->r27 = addr+16;
589+ regs->pc = resolver;
590+ return 3;
591+ }
592+ }
593+ } while (0);
594+#endif
595+
596+ return 1;
597+}
598+
599+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
600+{
601+ unsigned long i;
602+
603+ printk(KERN_ERR "PAX: bytes at PC: ");
604+ for (i = 0; i < 5; i++) {
605+ unsigned int c;
606+ if (get_user(c, (unsigned int *)pc+i))
607+ printk(KERN_CONT "???????? ");
608+ else
609+ printk(KERN_CONT "%08x ", c);
610+ }
611+ printk("\n");
612+}
613+#endif
614
615 /*
616 * This routine handles page faults. It determines the address,
617@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
618 good_area:
619 si_code = SEGV_ACCERR;
620 if (cause < 0) {
621- if (!(vma->vm_flags & VM_EXEC))
622+ if (!(vma->vm_flags & VM_EXEC)) {
623+
624+#ifdef CONFIG_PAX_PAGEEXEC
625+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
626+ goto bad_area;
627+
628+ up_read(&mm->mmap_sem);
629+ switch (pax_handle_fetch_fault(regs)) {
630+
631+#ifdef CONFIG_PAX_EMUPLT
632+ case 2:
633+ case 3:
634+ return;
635+#endif
636+
637+ }
638+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
639+ do_group_exit(SIGKILL);
640+#else
641 goto bad_area;
642+#endif
643+
644+ }
645 } else if (!cause) {
646 /* Allow reads even for write-only mappings */
647 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
648diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
649index 6aac3f5..265536b 100644
650--- a/arch/arm/include/asm/elf.h
651+++ b/arch/arm/include/asm/elf.h
652@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
653 the loader. We need to make sure that it is out of the way of the program
654 that it will "exec", and that there is sufficient room for the brk. */
655
656-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
657+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
658+
659+#ifdef CONFIG_PAX_ASLR
660+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
661+
662+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
663+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
664+#endif
665
666 /* When the program starts, a1 contains a pointer to a function to be
667 registered with atexit, as per the SVR4 ABI. A value of 0 means we
668diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
669index c019949..388fdd1 100644
670--- a/arch/arm/include/asm/kmap_types.h
671+++ b/arch/arm/include/asm/kmap_types.h
672@@ -19,6 +19,7 @@ enum km_type {
673 KM_SOFTIRQ0,
674 KM_SOFTIRQ1,
675 KM_L2_CACHE,
676+ KM_CLEARPAGE,
677 KM_TYPE_NR
678 };
679
680diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
681index 1d6bd40..fba0cb9 100644
682--- a/arch/arm/include/asm/uaccess.h
683+++ b/arch/arm/include/asm/uaccess.h
684@@ -22,6 +22,8 @@
685 #define VERIFY_READ 0
686 #define VERIFY_WRITE 1
687
688+extern void check_object_size(const void *ptr, unsigned long n, bool to);
689+
690 /*
691 * The exception table consists of pairs of addresses: the first is the
692 * address of an instruction that is allowed to fault, and the second is
693@@ -387,8 +389,23 @@ do { \
694
695
696 #ifdef CONFIG_MMU
697-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
698-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
699+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
700+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
701+
702+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
703+{
704+ if (!__builtin_constant_p(n))
705+ check_object_size(to, n, false);
706+ return ___copy_from_user(to, from, n);
707+}
708+
709+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
710+{
711+ if (!__builtin_constant_p(n))
712+ check_object_size(from, n, true);
713+ return ___copy_to_user(to, from, n);
714+}
715+
716 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
717 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
718 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
719@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
720
721 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
722 {
723+ if ((long)n < 0)
724+ return n;
725+
726 if (access_ok(VERIFY_READ, from, n))
727 n = __copy_from_user(to, from, n);
728 else /* security hole - plug it */
729@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
730
731 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
732 {
733+ if ((long)n < 0)
734+ return n;
735+
736 if (access_ok(VERIFY_WRITE, to, n))
737 n = __copy_to_user(to, from, n);
738 return n;
739diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
740index 0e62770..e2c2cd6 100644
741--- a/arch/arm/kernel/armksyms.c
742+++ b/arch/arm/kernel/armksyms.c
743@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
744 #ifdef CONFIG_MMU
745 EXPORT_SYMBOL(copy_page);
746
747-EXPORT_SYMBOL(__copy_from_user);
748-EXPORT_SYMBOL(__copy_to_user);
749+EXPORT_SYMBOL(___copy_from_user);
750+EXPORT_SYMBOL(___copy_to_user);
751 EXPORT_SYMBOL(__clear_user);
752
753 EXPORT_SYMBOL(__get_user_1);
754diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
755index ba8ccfe..2dc34dc 100644
756--- a/arch/arm/kernel/kgdb.c
757+++ b/arch/arm/kernel/kgdb.c
758@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
759 * and we handle the normal undef case within the do_undefinstr
760 * handler.
761 */
762-struct kgdb_arch arch_kgdb_ops = {
763+const struct kgdb_arch arch_kgdb_ops = {
764 #ifndef __ARMEB__
765 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
766 #else /* ! __ARMEB__ */
767diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
768index 3f361a7..6e806e1 100644
769--- a/arch/arm/kernel/traps.c
770+++ b/arch/arm/kernel/traps.c
771@@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
772
773 DEFINE_SPINLOCK(die_lock);
774
775+extern void gr_handle_kernel_exploit(void);
776+
777 /*
778 * This function is protected against re-entrancy.
779 */
780@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
781 if (panic_on_oops)
782 panic("Fatal exception");
783
784+ gr_handle_kernel_exploit();
785+
786 do_exit(SIGSEGV);
787 }
788
789diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
790index e4fe124..0fc246b 100644
791--- a/arch/arm/lib/copy_from_user.S
792+++ b/arch/arm/lib/copy_from_user.S
793@@ -16,7 +16,7 @@
794 /*
795 * Prototype:
796 *
797- * size_t __copy_from_user(void *to, const void *from, size_t n)
798+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
799 *
800 * Purpose:
801 *
802@@ -84,11 +84,11 @@
803
804 .text
805
806-ENTRY(__copy_from_user)
807+ENTRY(___copy_from_user)
808
809 #include "copy_template.S"
810
811-ENDPROC(__copy_from_user)
812+ENDPROC(___copy_from_user)
813
814 .section .fixup,"ax"
815 .align 0
816diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
817index 1a71e15..ac7b258 100644
818--- a/arch/arm/lib/copy_to_user.S
819+++ b/arch/arm/lib/copy_to_user.S
820@@ -16,7 +16,7 @@
821 /*
822 * Prototype:
823 *
824- * size_t __copy_to_user(void *to, const void *from, size_t n)
825+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
826 *
827 * Purpose:
828 *
829@@ -88,11 +88,11 @@
830 .text
831
832 ENTRY(__copy_to_user_std)
833-WEAK(__copy_to_user)
834+WEAK(___copy_to_user)
835
836 #include "copy_template.S"
837
838-ENDPROC(__copy_to_user)
839+ENDPROC(___copy_to_user)
840
841 .section .fixup,"ax"
842 .align 0
843diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
844index ffdd274..91017b6 100644
845--- a/arch/arm/lib/uaccess.S
846+++ b/arch/arm/lib/uaccess.S
847@@ -19,7 +19,7 @@
848
849 #define PAGE_SHIFT 12
850
851-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
852+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
853 * Purpose : copy a block to user memory from kernel memory
854 * Params : to - user memory
855 * : from - kernel memory
856@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
857 sub r2, r2, ip
858 b .Lc2u_dest_aligned
859
860-ENTRY(__copy_to_user)
861+ENTRY(___copy_to_user)
862 stmfd sp!, {r2, r4 - r7, lr}
863 cmp r2, #4
864 blt .Lc2u_not_enough
865@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
866 ldrgtb r3, [r1], #0
867 USER( strgtbt r3, [r0], #1) @ May fault
868 b .Lc2u_finished
869-ENDPROC(__copy_to_user)
870+ENDPROC(___copy_to_user)
871
872 .section .fixup,"ax"
873 .align 0
874 9001: ldmfd sp!, {r0, r4 - r7, pc}
875 .previous
876
877-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
878+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
879 * Purpose : copy a block from user memory to kernel memory
880 * Params : to - kernel memory
881 * : from - user memory
882@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
883 sub r2, r2, ip
884 b .Lcfu_dest_aligned
885
886-ENTRY(__copy_from_user)
887+ENTRY(___copy_from_user)
888 stmfd sp!, {r0, r2, r4 - r7, lr}
889 cmp r2, #4
890 blt .Lcfu_not_enough
891@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
892 USER( ldrgtbt r3, [r1], #1) @ May fault
893 strgtb r3, [r0], #1
894 b .Lcfu_finished
895-ENDPROC(__copy_from_user)
896+ENDPROC(___copy_from_user)
897
898 .section .fixup,"ax"
899 .align 0
900diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
901index 6b967ff..67d5b2b 100644
902--- a/arch/arm/lib/uaccess_with_memcpy.c
903+++ b/arch/arm/lib/uaccess_with_memcpy.c
904@@ -97,7 +97,7 @@ out:
905 }
906
907 unsigned long
908-__copy_to_user(void __user *to, const void *from, unsigned long n)
909+___copy_to_user(void __user *to, const void *from, unsigned long n)
910 {
911 /*
912 * This test is stubbed out of the main function above to keep
913diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
914index 4028724..beec230 100644
915--- a/arch/arm/mach-at91/pm.c
916+++ b/arch/arm/mach-at91/pm.c
917@@ -348,7 +348,7 @@ static void at91_pm_end(void)
918 }
919
920
921-static struct platform_suspend_ops at91_pm_ops ={
922+static const struct platform_suspend_ops at91_pm_ops ={
923 .valid = at91_pm_valid_state,
924 .begin = at91_pm_begin,
925 .enter = at91_pm_enter,
926diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
927index 5218943..0a34552 100644
928--- a/arch/arm/mach-omap1/pm.c
929+++ b/arch/arm/mach-omap1/pm.c
930@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
931
932
933
934-static struct platform_suspend_ops omap_pm_ops ={
935+static const struct platform_suspend_ops omap_pm_ops ={
936 .prepare = omap_pm_prepare,
937 .enter = omap_pm_enter,
938 .finish = omap_pm_finish,
939diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
940index bff5c4e..d4c649b 100644
941--- a/arch/arm/mach-omap2/pm24xx.c
942+++ b/arch/arm/mach-omap2/pm24xx.c
943@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
944 enable_hlt();
945 }
946
947-static struct platform_suspend_ops omap_pm_ops = {
948+static const struct platform_suspend_ops omap_pm_ops = {
949 .prepare = omap2_pm_prepare,
950 .enter = omap2_pm_enter,
951 .finish = omap2_pm_finish,
952diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
953index 8946319..7d3e661 100644
954--- a/arch/arm/mach-omap2/pm34xx.c
955+++ b/arch/arm/mach-omap2/pm34xx.c
956@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
957 return;
958 }
959
960-static struct platform_suspend_ops omap_pm_ops = {
961+static const struct platform_suspend_ops omap_pm_ops = {
962 .begin = omap3_pm_begin,
963 .end = omap3_pm_end,
964 .prepare = omap3_pm_prepare,
965diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
966index b3d8d53..6e68ebc 100644
967--- a/arch/arm/mach-pnx4008/pm.c
968+++ b/arch/arm/mach-pnx4008/pm.c
969@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
970 (state == PM_SUSPEND_MEM);
971 }
972
973-static struct platform_suspend_ops pnx4008_pm_ops = {
974+static const struct platform_suspend_ops pnx4008_pm_ops = {
975 .enter = pnx4008_pm_enter,
976 .valid = pnx4008_pm_valid,
977 };
978diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
979index 7693355..9beb00a 100644
980--- a/arch/arm/mach-pxa/pm.c
981+++ b/arch/arm/mach-pxa/pm.c
982@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
983 pxa_cpu_pm_fns->finish();
984 }
985
986-static struct platform_suspend_ops pxa_pm_ops = {
987+static const struct platform_suspend_ops pxa_pm_ops = {
988 .valid = pxa_pm_valid,
989 .enter = pxa_pm_enter,
990 .prepare = pxa_pm_prepare,
991diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
992index 629e05d..06be589 100644
993--- a/arch/arm/mach-pxa/sharpsl_pm.c
994+++ b/arch/arm/mach-pxa/sharpsl_pm.c
995@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
996 }
997
998 #ifdef CONFIG_PM
999-static struct platform_suspend_ops sharpsl_pm_ops = {
1000+static const struct platform_suspend_ops sharpsl_pm_ops = {
1001 .prepare = pxa_pm_prepare,
1002 .finish = pxa_pm_finish,
1003 .enter = corgi_pxa_pm_enter,
1004diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1005index c83fdc8..ab9fc44 100644
1006--- a/arch/arm/mach-sa1100/pm.c
1007+++ b/arch/arm/mach-sa1100/pm.c
1008@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1009 return virt_to_phys(sp);
1010 }
1011
1012-static struct platform_suspend_ops sa11x0_pm_ops = {
1013+static const struct platform_suspend_ops sa11x0_pm_ops = {
1014 .enter = sa11x0_pm_enter,
1015 .valid = suspend_valid_only_mem,
1016 };
1017diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1018index 3191cd6..c0739db 100644
1019--- a/arch/arm/mm/fault.c
1020+++ b/arch/arm/mm/fault.c
1021@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1022 }
1023 #endif
1024
1025+#ifdef CONFIG_PAX_PAGEEXEC
1026+ if (fsr & FSR_LNX_PF) {
1027+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1028+ do_group_exit(SIGKILL);
1029+ }
1030+#endif
1031+
1032 tsk->thread.address = addr;
1033 tsk->thread.error_code = fsr;
1034 tsk->thread.trap_no = 14;
1035@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1036 }
1037 #endif /* CONFIG_MMU */
1038
1039+#ifdef CONFIG_PAX_PAGEEXEC
1040+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1041+{
1042+ long i;
1043+
1044+ printk(KERN_ERR "PAX: bytes at PC: ");
1045+ for (i = 0; i < 20; i++) {
1046+ unsigned char c;
1047+ if (get_user(c, (__force unsigned char __user *)pc+i))
1048+ printk(KERN_CONT "?? ");
1049+ else
1050+ printk(KERN_CONT "%02x ", c);
1051+ }
1052+ printk("\n");
1053+
1054+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1055+ for (i = -1; i < 20; i++) {
1056+ unsigned long c;
1057+ if (get_user(c, (__force unsigned long __user *)sp+i))
1058+ printk(KERN_CONT "???????? ");
1059+ else
1060+ printk(KERN_CONT "%08lx ", c);
1061+ }
1062+ printk("\n");
1063+}
1064+#endif
1065+
1066 /*
1067 * First Level Translation Fault Handler
1068 *
1069diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1070index f5abc51..7ec524c 100644
1071--- a/arch/arm/mm/mmap.c
1072+++ b/arch/arm/mm/mmap.c
1073@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1074 if (len > TASK_SIZE)
1075 return -ENOMEM;
1076
1077+#ifdef CONFIG_PAX_RANDMMAP
1078+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1079+#endif
1080+
1081 if (addr) {
1082 if (do_align)
1083 addr = COLOUR_ALIGN(addr, pgoff);
1084@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1085 addr = PAGE_ALIGN(addr);
1086
1087 vma = find_vma(mm, addr);
1088- if (TASK_SIZE - len >= addr &&
1089- (!vma || addr + len <= vma->vm_start))
1090+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1091 return addr;
1092 }
1093 if (len > mm->cached_hole_size) {
1094- start_addr = addr = mm->free_area_cache;
1095+ start_addr = addr = mm->free_area_cache;
1096 } else {
1097- start_addr = addr = TASK_UNMAPPED_BASE;
1098- mm->cached_hole_size = 0;
1099+ start_addr = addr = mm->mmap_base;
1100+ mm->cached_hole_size = 0;
1101 }
1102
1103 full_search:
1104@@ -94,14 +97,14 @@ full_search:
1105 * Start a new search - just in case we missed
1106 * some holes.
1107 */
1108- if (start_addr != TASK_UNMAPPED_BASE) {
1109- start_addr = addr = TASK_UNMAPPED_BASE;
1110+ if (start_addr != mm->mmap_base) {
1111+ start_addr = addr = mm->mmap_base;
1112 mm->cached_hole_size = 0;
1113 goto full_search;
1114 }
1115 return -ENOMEM;
1116 }
1117- if (!vma || addr + len <= vma->vm_start) {
1118+ if (check_heap_stack_gap(vma, addr, len)) {
1119 /*
1120 * Remember the place where we stopped the search:
1121 */
1122diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1123index 8d97db2..b66cfa5 100644
1124--- a/arch/arm/plat-s3c/pm.c
1125+++ b/arch/arm/plat-s3c/pm.c
1126@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1127 s3c_pm_check_cleanup();
1128 }
1129
1130-static struct platform_suspend_ops s3c_pm_ops = {
1131+static const struct platform_suspend_ops s3c_pm_ops = {
1132 .enter = s3c_pm_enter,
1133 .prepare = s3c_pm_prepare,
1134 .finish = s3c_pm_finish,
1135diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1136index d5d1d41..856e2ed 100644
1137--- a/arch/avr32/include/asm/elf.h
1138+++ b/arch/avr32/include/asm/elf.h
1139@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1140 the loader. We need to make sure that it is out of the way of the program
1141 that it will "exec", and that there is sufficient room for the brk. */
1142
1143-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1144+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1145
1146+#ifdef CONFIG_PAX_ASLR
1147+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1148+
1149+#define PAX_DELTA_MMAP_LEN 15
1150+#define PAX_DELTA_STACK_LEN 15
1151+#endif
1152
1153 /* This yields a mask that user programs can use to figure out what
1154 instruction set this CPU supports. This could be done in user space,
1155diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1156index b7f5c68..556135c 100644
1157--- a/arch/avr32/include/asm/kmap_types.h
1158+++ b/arch/avr32/include/asm/kmap_types.h
1159@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1160 D(11) KM_IRQ1,
1161 D(12) KM_SOFTIRQ0,
1162 D(13) KM_SOFTIRQ1,
1163-D(14) KM_TYPE_NR
1164+D(14) KM_CLEARPAGE,
1165+D(15) KM_TYPE_NR
1166 };
1167
1168 #undef D
1169diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1170index f021edf..32d680e 100644
1171--- a/arch/avr32/mach-at32ap/pm.c
1172+++ b/arch/avr32/mach-at32ap/pm.c
1173@@ -176,7 +176,7 @@ out:
1174 return 0;
1175 }
1176
1177-static struct platform_suspend_ops avr32_pm_ops = {
1178+static const struct platform_suspend_ops avr32_pm_ops = {
1179 .valid = avr32_pm_valid_state,
1180 .enter = avr32_pm_enter,
1181 };
1182diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1183index b61d86d..e292c7f 100644
1184--- a/arch/avr32/mm/fault.c
1185+++ b/arch/avr32/mm/fault.c
1186@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1187
1188 int exception_trace = 1;
1189
1190+#ifdef CONFIG_PAX_PAGEEXEC
1191+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1192+{
1193+ unsigned long i;
1194+
1195+ printk(KERN_ERR "PAX: bytes at PC: ");
1196+ for (i = 0; i < 20; i++) {
1197+ unsigned char c;
1198+ if (get_user(c, (unsigned char *)pc+i))
1199+ printk(KERN_CONT "???????? ");
1200+ else
1201+ printk(KERN_CONT "%02x ", c);
1202+ }
1203+ printk("\n");
1204+}
1205+#endif
1206+
1207 /*
1208 * This routine handles page faults. It determines the address and the
1209 * problem, and then passes it off to one of the appropriate routines.
1210@@ -157,6 +174,16 @@ bad_area:
1211 up_read(&mm->mmap_sem);
1212
1213 if (user_mode(regs)) {
1214+
1215+#ifdef CONFIG_PAX_PAGEEXEC
1216+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1217+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1218+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1219+ do_group_exit(SIGKILL);
1220+ }
1221+ }
1222+#endif
1223+
1224 if (exception_trace && printk_ratelimit())
1225 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1226 "sp %08lx ecr %lu\n",
1227diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1228index cce79d0..c406c85 100644
1229--- a/arch/blackfin/kernel/kgdb.c
1230+++ b/arch/blackfin/kernel/kgdb.c
1231@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1232 return -1; /* this means that we do not want to exit from the handler */
1233 }
1234
1235-struct kgdb_arch arch_kgdb_ops = {
1236+const struct kgdb_arch arch_kgdb_ops = {
1237 .gdb_bpt_instr = {0xa1},
1238 #ifdef CONFIG_SMP
1239 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1240diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1241index 8837be4..b2fb413 100644
1242--- a/arch/blackfin/mach-common/pm.c
1243+++ b/arch/blackfin/mach-common/pm.c
1244@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1245 return 0;
1246 }
1247
1248-struct platform_suspend_ops bfin_pm_ops = {
1249+const struct platform_suspend_ops bfin_pm_ops = {
1250 .enter = bfin_pm_enter,
1251 .valid = bfin_pm_valid,
1252 };
1253diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1254index f8e16b2..c73ff79 100644
1255--- a/arch/frv/include/asm/kmap_types.h
1256+++ b/arch/frv/include/asm/kmap_types.h
1257@@ -23,6 +23,7 @@ enum km_type {
1258 KM_IRQ1,
1259 KM_SOFTIRQ0,
1260 KM_SOFTIRQ1,
1261+ KM_CLEARPAGE,
1262 KM_TYPE_NR
1263 };
1264
1265diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1266index 385fd30..6c3d97e 100644
1267--- a/arch/frv/mm/elf-fdpic.c
1268+++ b/arch/frv/mm/elf-fdpic.c
1269@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1270 if (addr) {
1271 addr = PAGE_ALIGN(addr);
1272 vma = find_vma(current->mm, addr);
1273- if (TASK_SIZE - len >= addr &&
1274- (!vma || addr + len <= vma->vm_start))
1275+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1276 goto success;
1277 }
1278
1279@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1280 for (; vma; vma = vma->vm_next) {
1281 if (addr > limit)
1282 break;
1283- if (addr + len <= vma->vm_start)
1284+ if (check_heap_stack_gap(vma, addr, len))
1285 goto success;
1286 addr = vma->vm_end;
1287 }
1288@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1289 for (; vma; vma = vma->vm_next) {
1290 if (addr > limit)
1291 break;
1292- if (addr + len <= vma->vm_start)
1293+ if (check_heap_stack_gap(vma, addr, len))
1294 goto success;
1295 addr = vma->vm_end;
1296 }
1297diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1298index e4a80d8..11a7ea1 100644
1299--- a/arch/ia64/hp/common/hwsw_iommu.c
1300+++ b/arch/ia64/hp/common/hwsw_iommu.c
1301@@ -17,7 +17,7 @@
1302 #include <linux/swiotlb.h>
1303 #include <asm/machvec.h>
1304
1305-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1306+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1307
1308 /* swiotlb declarations & definitions: */
1309 extern int swiotlb_late_init_with_default_size (size_t size);
1310@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1311 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1312 }
1313
1314-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1315+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1316 {
1317 if (use_swiotlb(dev))
1318 return &swiotlb_dma_ops;
1319diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1320index 01ae69b..35752fd 100644
1321--- a/arch/ia64/hp/common/sba_iommu.c
1322+++ b/arch/ia64/hp/common/sba_iommu.c
1323@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1324 },
1325 };
1326
1327-extern struct dma_map_ops swiotlb_dma_ops;
1328+extern const struct dma_map_ops swiotlb_dma_ops;
1329
1330 static int __init
1331 sba_init(void)
1332@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1333
1334 __setup("sbapagesize=",sba_page_override);
1335
1336-struct dma_map_ops sba_dma_ops = {
1337+const struct dma_map_ops sba_dma_ops = {
1338 .alloc_coherent = sba_alloc_coherent,
1339 .free_coherent = sba_free_coherent,
1340 .map_page = sba_map_page,
1341diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1342index c69552b..c7122f4 100644
1343--- a/arch/ia64/ia32/binfmt_elf32.c
1344+++ b/arch/ia64/ia32/binfmt_elf32.c
1345@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1346
1347 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1348
1349+#ifdef CONFIG_PAX_ASLR
1350+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1351+
1352+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1353+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1354+#endif
1355+
1356 /* Ugly but avoids duplication */
1357 #include "../../../fs/binfmt_elf.c"
1358
1359diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1360index 0f15349..26b3429 100644
1361--- a/arch/ia64/ia32/ia32priv.h
1362+++ b/arch/ia64/ia32/ia32priv.h
1363@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1364 #define ELF_DATA ELFDATA2LSB
1365 #define ELF_ARCH EM_386
1366
1367-#define IA32_STACK_TOP IA32_PAGE_OFFSET
1368+#ifdef CONFIG_PAX_RANDUSTACK
1369+#define __IA32_DELTA_STACK (current->mm->delta_stack)
1370+#else
1371+#define __IA32_DELTA_STACK 0UL
1372+#endif
1373+
1374+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1375+
1376 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1377 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1378
1379diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1380index 8d3c79c..71b3af6 100644
1381--- a/arch/ia64/include/asm/dma-mapping.h
1382+++ b/arch/ia64/include/asm/dma-mapping.h
1383@@ -12,7 +12,7 @@
1384
1385 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1386
1387-extern struct dma_map_ops *dma_ops;
1388+extern const struct dma_map_ops *dma_ops;
1389 extern struct ia64_machine_vector ia64_mv;
1390 extern void set_iommu_machvec(void);
1391
1392@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1393 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1394 dma_addr_t *daddr, gfp_t gfp)
1395 {
1396- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1397+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1398 void *caddr;
1399
1400 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1401@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1402 static inline void dma_free_coherent(struct device *dev, size_t size,
1403 void *caddr, dma_addr_t daddr)
1404 {
1405- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1406+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1407 debug_dma_free_coherent(dev, size, caddr, daddr);
1408 ops->free_coherent(dev, size, caddr, daddr);
1409 }
1410@@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1411
1412 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1413 {
1414- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1415+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1416 return ops->mapping_error(dev, daddr);
1417 }
1418
1419 static inline int dma_supported(struct device *dev, u64 mask)
1420 {
1421- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1422+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1423 return ops->dma_supported(dev, mask);
1424 }
1425
1426diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1427index 86eddee..b116bb4 100644
1428--- a/arch/ia64/include/asm/elf.h
1429+++ b/arch/ia64/include/asm/elf.h
1430@@ -43,6 +43,13 @@
1431 */
1432 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1433
1434+#ifdef CONFIG_PAX_ASLR
1435+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1436+
1437+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1438+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1439+#endif
1440+
1441 #define PT_IA_64_UNWIND 0x70000001
1442
1443 /* IA-64 relocations: */
1444diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1445index 367d299..9ad4279 100644
1446--- a/arch/ia64/include/asm/machvec.h
1447+++ b/arch/ia64/include/asm/machvec.h
1448@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1449 /* DMA-mapping interface: */
1450 typedef void ia64_mv_dma_init (void);
1451 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1452-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1453+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1454
1455 /*
1456 * WARNING: The legacy I/O space is _architected_. Platforms are
1457@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1458 # endif /* CONFIG_IA64_GENERIC */
1459
1460 extern void swiotlb_dma_init(void);
1461-extern struct dma_map_ops *dma_get_ops(struct device *);
1462+extern const struct dma_map_ops *dma_get_ops(struct device *);
1463
1464 /*
1465 * Define default versions so we can extend machvec for new platforms without having
1466diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1467index 8840a69..cdb63d9 100644
1468--- a/arch/ia64/include/asm/pgtable.h
1469+++ b/arch/ia64/include/asm/pgtable.h
1470@@ -12,7 +12,7 @@
1471 * David Mosberger-Tang <davidm@hpl.hp.com>
1472 */
1473
1474-
1475+#include <linux/const.h>
1476 #include <asm/mman.h>
1477 #include <asm/page.h>
1478 #include <asm/processor.h>
1479@@ -143,6 +143,17 @@
1480 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1481 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1482 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1483+
1484+#ifdef CONFIG_PAX_PAGEEXEC
1485+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1486+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1487+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1488+#else
1489+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1490+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1491+# define PAGE_COPY_NOEXEC PAGE_COPY
1492+#endif
1493+
1494 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1495 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1496 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1497diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1498index 239ecdc..f94170e 100644
1499--- a/arch/ia64/include/asm/spinlock.h
1500+++ b/arch/ia64/include/asm/spinlock.h
1501@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1502 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1503
1504 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1505- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1506+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1507 }
1508
1509 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1510diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1511index 449c8c0..432a3d2 100644
1512--- a/arch/ia64/include/asm/uaccess.h
1513+++ b/arch/ia64/include/asm/uaccess.h
1514@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1515 const void *__cu_from = (from); \
1516 long __cu_len = (n); \
1517 \
1518- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1519+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1520 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1521 __cu_len; \
1522 })
1523@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1524 long __cu_len = (n); \
1525 \
1526 __chk_user_ptr(__cu_from); \
1527- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1528+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1529 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1530 __cu_len; \
1531 })
1532diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1533index f2c1600..969398a 100644
1534--- a/arch/ia64/kernel/dma-mapping.c
1535+++ b/arch/ia64/kernel/dma-mapping.c
1536@@ -3,7 +3,7 @@
1537 /* Set this to 1 if there is a HW IOMMU in the system */
1538 int iommu_detected __read_mostly;
1539
1540-struct dma_map_ops *dma_ops;
1541+const struct dma_map_ops *dma_ops;
1542 EXPORT_SYMBOL(dma_ops);
1543
1544 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1545@@ -16,7 +16,7 @@ static int __init dma_init(void)
1546 }
1547 fs_initcall(dma_init);
1548
1549-struct dma_map_ops *dma_get_ops(struct device *dev)
1550+const struct dma_map_ops *dma_get_ops(struct device *dev)
1551 {
1552 return dma_ops;
1553 }
1554diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1555index 1481b0a..e7d38ff 100644
1556--- a/arch/ia64/kernel/module.c
1557+++ b/arch/ia64/kernel/module.c
1558@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1559 void
1560 module_free (struct module *mod, void *module_region)
1561 {
1562- if (mod && mod->arch.init_unw_table &&
1563- module_region == mod->module_init) {
1564+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1565 unw_remove_unwind_table(mod->arch.init_unw_table);
1566 mod->arch.init_unw_table = NULL;
1567 }
1568@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1569 }
1570
1571 static inline int
1572+in_init_rx (const struct module *mod, uint64_t addr)
1573+{
1574+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1575+}
1576+
1577+static inline int
1578+in_init_rw (const struct module *mod, uint64_t addr)
1579+{
1580+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1581+}
1582+
1583+static inline int
1584 in_init (const struct module *mod, uint64_t addr)
1585 {
1586- return addr - (uint64_t) mod->module_init < mod->init_size;
1587+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1588+}
1589+
1590+static inline int
1591+in_core_rx (const struct module *mod, uint64_t addr)
1592+{
1593+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1594+}
1595+
1596+static inline int
1597+in_core_rw (const struct module *mod, uint64_t addr)
1598+{
1599+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1600 }
1601
1602 static inline int
1603 in_core (const struct module *mod, uint64_t addr)
1604 {
1605- return addr - (uint64_t) mod->module_core < mod->core_size;
1606+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1607 }
1608
1609 static inline int
1610@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1611 break;
1612
1613 case RV_BDREL:
1614- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1615+ if (in_init_rx(mod, val))
1616+ val -= (uint64_t) mod->module_init_rx;
1617+ else if (in_init_rw(mod, val))
1618+ val -= (uint64_t) mod->module_init_rw;
1619+ else if (in_core_rx(mod, val))
1620+ val -= (uint64_t) mod->module_core_rx;
1621+ else if (in_core_rw(mod, val))
1622+ val -= (uint64_t) mod->module_core_rw;
1623 break;
1624
1625 case RV_LTV:
1626@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1627 * addresses have been selected...
1628 */
1629 uint64_t gp;
1630- if (mod->core_size > MAX_LTOFF)
1631+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1632 /*
1633 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1634 * at the end of the module.
1635 */
1636- gp = mod->core_size - MAX_LTOFF / 2;
1637+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1638 else
1639- gp = mod->core_size / 2;
1640- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1641+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1642+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1643 mod->arch.gp = gp;
1644 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1645 }
1646diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1647index f6b1ff0..de773fb 100644
1648--- a/arch/ia64/kernel/pci-dma.c
1649+++ b/arch/ia64/kernel/pci-dma.c
1650@@ -43,7 +43,7 @@ struct device fallback_dev = {
1651 .dma_mask = &fallback_dev.coherent_dma_mask,
1652 };
1653
1654-extern struct dma_map_ops intel_dma_ops;
1655+extern const struct dma_map_ops intel_dma_ops;
1656
1657 static int __init pci_iommu_init(void)
1658 {
1659@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1660 }
1661 EXPORT_SYMBOL(iommu_dma_supported);
1662
1663+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1664+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1665+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1666+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1667+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1668+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1669+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1670+
1671+static const struct dma_map_ops intel_iommu_dma_ops = {
1672+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1673+ .alloc_coherent = intel_alloc_coherent,
1674+ .free_coherent = intel_free_coherent,
1675+ .map_sg = intel_map_sg,
1676+ .unmap_sg = intel_unmap_sg,
1677+ .map_page = intel_map_page,
1678+ .unmap_page = intel_unmap_page,
1679+ .mapping_error = intel_mapping_error,
1680+
1681+ .sync_single_for_cpu = machvec_dma_sync_single,
1682+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1683+ .sync_single_for_device = machvec_dma_sync_single,
1684+ .sync_sg_for_device = machvec_dma_sync_sg,
1685+ .dma_supported = iommu_dma_supported,
1686+};
1687+
1688 void __init pci_iommu_alloc(void)
1689 {
1690- dma_ops = &intel_dma_ops;
1691-
1692- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1693- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1694- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1695- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1696- dma_ops->dma_supported = iommu_dma_supported;
1697+ dma_ops = &intel_iommu_dma_ops;
1698
1699 /*
1700 * The order of these functions is important for
1701diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
1702index 285aae8..61dbab6 100644
1703--- a/arch/ia64/kernel/pci-swiotlb.c
1704+++ b/arch/ia64/kernel/pci-swiotlb.c
1705@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1706 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1707 }
1708
1709-struct dma_map_ops swiotlb_dma_ops = {
1710+const struct dma_map_ops swiotlb_dma_ops = {
1711 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1712 .free_coherent = swiotlb_free_coherent,
1713 .map_page = swiotlb_map_page,
1714diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1715index 609d500..7dde2a8 100644
1716--- a/arch/ia64/kernel/sys_ia64.c
1717+++ b/arch/ia64/kernel/sys_ia64.c
1718@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1719 if (REGION_NUMBER(addr) == RGN_HPAGE)
1720 addr = 0;
1721 #endif
1722+
1723+#ifdef CONFIG_PAX_RANDMMAP
1724+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1725+ addr = mm->free_area_cache;
1726+ else
1727+#endif
1728+
1729 if (!addr)
1730 addr = mm->free_area_cache;
1731
1732@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1733 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1734 /* At this point: (!vma || addr < vma->vm_end). */
1735 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1736- if (start_addr != TASK_UNMAPPED_BASE) {
1737+ if (start_addr != mm->mmap_base) {
1738 /* Start a new search --- just in case we missed some holes. */
1739- addr = TASK_UNMAPPED_BASE;
1740+ addr = mm->mmap_base;
1741 goto full_search;
1742 }
1743 return -ENOMEM;
1744 }
1745- if (!vma || addr + len <= vma->vm_start) {
1746+ if (check_heap_stack_gap(vma, addr, len)) {
1747 /* Remember the address where we stopped this search: */
1748 mm->free_area_cache = addr + len;
1749 return addr;
1750diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
1751index 8f06035..b3a5818 100644
1752--- a/arch/ia64/kernel/topology.c
1753+++ b/arch/ia64/kernel/topology.c
1754@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
1755 return ret;
1756 }
1757
1758-static struct sysfs_ops cache_sysfs_ops = {
1759+static const struct sysfs_ops cache_sysfs_ops = {
1760 .show = cache_show
1761 };
1762
1763diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1764index 0a0c77b..8e55a81 100644
1765--- a/arch/ia64/kernel/vmlinux.lds.S
1766+++ b/arch/ia64/kernel/vmlinux.lds.S
1767@@ -190,7 +190,7 @@ SECTIONS
1768 /* Per-cpu data: */
1769 . = ALIGN(PERCPU_PAGE_SIZE);
1770 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1771- __phys_per_cpu_start = __per_cpu_load;
1772+ __phys_per_cpu_start = per_cpu_load;
1773 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1774 * into percpu page size
1775 */
1776diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1777index 19261a9..1611b7a 100644
1778--- a/arch/ia64/mm/fault.c
1779+++ b/arch/ia64/mm/fault.c
1780@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
1781 return pte_present(pte);
1782 }
1783
1784+#ifdef CONFIG_PAX_PAGEEXEC
1785+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1786+{
1787+ unsigned long i;
1788+
1789+ printk(KERN_ERR "PAX: bytes at PC: ");
1790+ for (i = 0; i < 8; i++) {
1791+ unsigned int c;
1792+ if (get_user(c, (unsigned int *)pc+i))
1793+ printk(KERN_CONT "???????? ");
1794+ else
1795+ printk(KERN_CONT "%08x ", c);
1796+ }
1797+ printk("\n");
1798+}
1799+#endif
1800+
1801 void __kprobes
1802 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1803 {
1804@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1805 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1806 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1807
1808- if ((vma->vm_flags & mask) != mask)
1809+ if ((vma->vm_flags & mask) != mask) {
1810+
1811+#ifdef CONFIG_PAX_PAGEEXEC
1812+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1813+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1814+ goto bad_area;
1815+
1816+ up_read(&mm->mmap_sem);
1817+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1818+ do_group_exit(SIGKILL);
1819+ }
1820+#endif
1821+
1822 goto bad_area;
1823
1824+ }
1825+
1826 survive:
1827 /*
1828 * If for any reason at all we couldn't handle the fault, make
1829diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1830index b0f6157..a082bbc 100644
1831--- a/arch/ia64/mm/hugetlbpage.c
1832+++ b/arch/ia64/mm/hugetlbpage.c
1833@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1834 /* At this point: (!vmm || addr < vmm->vm_end). */
1835 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1836 return -ENOMEM;
1837- if (!vmm || (addr + len) <= vmm->vm_start)
1838+ if (check_heap_stack_gap(vmm, addr, len))
1839 return addr;
1840 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1841 }
1842diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1843index 1857766..05cc6a3 100644
1844--- a/arch/ia64/mm/init.c
1845+++ b/arch/ia64/mm/init.c
1846@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1847 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1848 vma->vm_end = vma->vm_start + PAGE_SIZE;
1849 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1850+
1851+#ifdef CONFIG_PAX_PAGEEXEC
1852+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1853+ vma->vm_flags &= ~VM_EXEC;
1854+
1855+#ifdef CONFIG_PAX_MPROTECT
1856+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1857+ vma->vm_flags &= ~VM_MAYEXEC;
1858+#endif
1859+
1860+ }
1861+#endif
1862+
1863 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1864 down_write(&current->mm->mmap_sem);
1865 if (insert_vm_struct(current->mm, vma)) {
1866diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
1867index 98b6849..8046766 100644
1868--- a/arch/ia64/sn/pci/pci_dma.c
1869+++ b/arch/ia64/sn/pci/pci_dma.c
1870@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
1871 return ret;
1872 }
1873
1874-static struct dma_map_ops sn_dma_ops = {
1875+static const struct dma_map_ops sn_dma_ops = {
1876 .alloc_coherent = sn_dma_alloc_coherent,
1877 .free_coherent = sn_dma_free_coherent,
1878 .map_page = sn_dma_map_page,
1879diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1880index 82abd15..d95ae5d 100644
1881--- a/arch/m32r/lib/usercopy.c
1882+++ b/arch/m32r/lib/usercopy.c
1883@@ -14,6 +14,9 @@
1884 unsigned long
1885 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1886 {
1887+ if ((long)n < 0)
1888+ return n;
1889+
1890 prefetch(from);
1891 if (access_ok(VERIFY_WRITE, to, n))
1892 __copy_user(to,from,n);
1893@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1894 unsigned long
1895 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1896 {
1897+ if ((long)n < 0)
1898+ return n;
1899+
1900 prefetchw(to);
1901 if (access_ok(VERIFY_READ, from, n))
1902 __copy_user_zeroing(to,from,n);
1903diff --git a/arch/mips/Makefile b/arch/mips/Makefile
1904index 77f5021..2b1db8a 100644
1905--- a/arch/mips/Makefile
1906+++ b/arch/mips/Makefile
1907@@ -51,6 +51,8 @@ endif
1908 cflags-y := -ffunction-sections
1909 cflags-y += $(call cc-option, -mno-check-zero-division)
1910
1911+cflags-y += -Wno-sign-compare -Wno-extra
1912+
1913 ifdef CONFIG_32BIT
1914 ld-emul = $(32bit-emul)
1915 vmlinux-32 = vmlinux
1916diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
1917index 632f986..fd0378d 100644
1918--- a/arch/mips/alchemy/devboards/pm.c
1919+++ b/arch/mips/alchemy/devboards/pm.c
1920@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1921
1922 }
1923
1924-static struct platform_suspend_ops db1x_pm_ops = {
1925+static const struct platform_suspend_ops db1x_pm_ops = {
1926 .valid = suspend_valid_only_mem,
1927 .begin = db1x_pm_begin,
1928 .enter = db1x_pm_enter,
1929diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1930index 7990694..4e93acf 100644
1931--- a/arch/mips/include/asm/elf.h
1932+++ b/arch/mips/include/asm/elf.h
1933@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
1934 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1935 #endif
1936
1937+#ifdef CONFIG_PAX_ASLR
1938+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1939+
1940+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1941+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1942+#endif
1943+
1944 #endif /* _ASM_ELF_H */
1945diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1946index f266295..627cfff 100644
1947--- a/arch/mips/include/asm/page.h
1948+++ b/arch/mips/include/asm/page.h
1949@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1950 #ifdef CONFIG_CPU_MIPS32
1951 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1952 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1953- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1954+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1955 #else
1956 typedef struct { unsigned long long pte; } pte_t;
1957 #define pte_val(x) ((x).pte)
1958diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
1959index e48c0bf..f3acf65 100644
1960--- a/arch/mips/include/asm/reboot.h
1961+++ b/arch/mips/include/asm/reboot.h
1962@@ -9,7 +9,7 @@
1963 #ifndef _ASM_REBOOT_H
1964 #define _ASM_REBOOT_H
1965
1966-extern void (*_machine_restart)(char *command);
1967-extern void (*_machine_halt)(void);
1968+extern void (*__noreturn _machine_restart)(char *command);
1969+extern void (*__noreturn _machine_halt)(void);
1970
1971 #endif /* _ASM_REBOOT_H */
1972diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1973index 83b5509..9fa24a23 100644
1974--- a/arch/mips/include/asm/system.h
1975+++ b/arch/mips/include/asm/system.h
1976@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1977 */
1978 #define __ARCH_WANT_UNLOCKED_CTXSW
1979
1980-extern unsigned long arch_align_stack(unsigned long sp);
1981+#define arch_align_stack(x) ((x) & ~0xfUL)
1982
1983 #endif /* _ASM_SYSTEM_H */
1984diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1985index 9fdd8bc..fcf9d68 100644
1986--- a/arch/mips/kernel/binfmt_elfn32.c
1987+++ b/arch/mips/kernel/binfmt_elfn32.c
1988@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1989 #undef ELF_ET_DYN_BASE
1990 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1991
1992+#ifdef CONFIG_PAX_ASLR
1993+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1994+
1995+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1996+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1997+#endif
1998+
1999 #include <asm/processor.h>
2000 #include <linux/module.h>
2001 #include <linux/elfcore.h>
2002diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2003index ff44823..cf0b48a 100644
2004--- a/arch/mips/kernel/binfmt_elfo32.c
2005+++ b/arch/mips/kernel/binfmt_elfo32.c
2006@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2007 #undef ELF_ET_DYN_BASE
2008 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2009
2010+#ifdef CONFIG_PAX_ASLR
2011+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2012+
2013+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2014+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2015+#endif
2016+
2017 #include <asm/processor.h>
2018
2019 /*
2020diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2021index 50c9bb8..efdd5f8 100644
2022--- a/arch/mips/kernel/kgdb.c
2023+++ b/arch/mips/kernel/kgdb.c
2024@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2025 return -1;
2026 }
2027
2028+/* cannot be const */
2029 struct kgdb_arch arch_kgdb_ops;
2030
2031 /*
2032diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2033index f3d73e1..bb3f57a 100644
2034--- a/arch/mips/kernel/process.c
2035+++ b/arch/mips/kernel/process.c
2036@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2037 out:
2038 return pc;
2039 }
2040-
2041-/*
2042- * Don't forget that the stack pointer must be aligned on a 8 bytes
2043- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2044- */
2045-unsigned long arch_align_stack(unsigned long sp)
2046-{
2047- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2048- sp -= get_random_int() & ~PAGE_MASK;
2049-
2050- return sp & ALMASK;
2051-}
2052diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2053index 060563a..7fbf310 100644
2054--- a/arch/mips/kernel/reset.c
2055+++ b/arch/mips/kernel/reset.c
2056@@ -19,8 +19,8 @@
2057 * So handle all using function pointers to machine specific
2058 * functions.
2059 */
2060-void (*_machine_restart)(char *command);
2061-void (*_machine_halt)(void);
2062+void (*__noreturn _machine_restart)(char *command);
2063+void (*__noreturn _machine_halt)(void);
2064 void (*pm_power_off)(void);
2065
2066 EXPORT_SYMBOL(pm_power_off);
2067@@ -29,16 +29,19 @@ void machine_restart(char *command)
2068 {
2069 if (_machine_restart)
2070 _machine_restart(command);
2071+ BUG();
2072 }
2073
2074 void machine_halt(void)
2075 {
2076 if (_machine_halt)
2077 _machine_halt();
2078+ BUG();
2079 }
2080
2081 void machine_power_off(void)
2082 {
2083 if (pm_power_off)
2084 pm_power_off();
2085+ BUG();
2086 }
2087diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2088index 3f7f466..3abe0b5 100644
2089--- a/arch/mips/kernel/syscall.c
2090+++ b/arch/mips/kernel/syscall.c
2091@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2092 do_color_align = 0;
2093 if (filp || (flags & MAP_SHARED))
2094 do_color_align = 1;
2095+
2096+#ifdef CONFIG_PAX_RANDMMAP
2097+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2098+#endif
2099+
2100 if (addr) {
2101 if (do_color_align)
2102 addr = COLOUR_ALIGN(addr, pgoff);
2103 else
2104 addr = PAGE_ALIGN(addr);
2105 vmm = find_vma(current->mm, addr);
2106- if (task_size - len >= addr &&
2107- (!vmm || addr + len <= vmm->vm_start))
2108+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2109 return addr;
2110 }
2111- addr = TASK_UNMAPPED_BASE;
2112+ addr = current->mm->mmap_base;
2113 if (do_color_align)
2114 addr = COLOUR_ALIGN(addr, pgoff);
2115 else
2116@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2117 /* At this point: (!vmm || addr < vmm->vm_end). */
2118 if (task_size - len < addr)
2119 return -ENOMEM;
2120- if (!vmm || addr + len <= vmm->vm_start)
2121+ if (check_heap_stack_gap(vmm, addr, len))
2122 return addr;
2123 addr = vmm->vm_end;
2124 if (do_color_align)
2125diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2126index e97a7a2..f18f5b0 100644
2127--- a/arch/mips/mm/fault.c
2128+++ b/arch/mips/mm/fault.c
2129@@ -26,6 +26,23 @@
2130 #include <asm/ptrace.h>
2131 #include <asm/highmem.h> /* For VMALLOC_END */
2132
2133+#ifdef CONFIG_PAX_PAGEEXEC
2134+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2135+{
2136+ unsigned long i;
2137+
2138+ printk(KERN_ERR "PAX: bytes at PC: ");
2139+ for (i = 0; i < 5; i++) {
2140+ unsigned int c;
2141+ if (get_user(c, (unsigned int *)pc+i))
2142+ printk(KERN_CONT "???????? ");
2143+ else
2144+ printk(KERN_CONT "%08x ", c);
2145+ }
2146+ printk("\n");
2147+}
2148+#endif
2149+
2150 /*
2151 * This routine handles page faults. It determines the address,
2152 * and the problem, and then passes it off to one of the appropriate
2153diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2154index 9c802eb..0592e41 100644
2155--- a/arch/parisc/include/asm/elf.h
2156+++ b/arch/parisc/include/asm/elf.h
2157@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2158
2159 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2160
2161+#ifdef CONFIG_PAX_ASLR
2162+#define PAX_ELF_ET_DYN_BASE 0x10000UL
2163+
2164+#define PAX_DELTA_MMAP_LEN 16
2165+#define PAX_DELTA_STACK_LEN 16
2166+#endif
2167+
2168 /* This yields a mask that user programs can use to figure out what
2169 instruction set this CPU supports. This could be done in user space,
2170 but it's not easy, and we've already done it here. */
2171diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2172index a27d2e2..18fd845 100644
2173--- a/arch/parisc/include/asm/pgtable.h
2174+++ b/arch/parisc/include/asm/pgtable.h
2175@@ -207,6 +207,17 @@
2176 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2177 #define PAGE_COPY PAGE_EXECREAD
2178 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2179+
2180+#ifdef CONFIG_PAX_PAGEEXEC
2181+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2182+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2183+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2184+#else
2185+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2186+# define PAGE_COPY_NOEXEC PAGE_COPY
2187+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2188+#endif
2189+
2190 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2191 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2192 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2193diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2194index 2120746..8d70a5e 100644
2195--- a/arch/parisc/kernel/module.c
2196+++ b/arch/parisc/kernel/module.c
2197@@ -95,16 +95,38 @@
2198
2199 /* three functions to determine where in the module core
2200 * or init pieces the location is */
2201+static inline int in_init_rx(struct module *me, void *loc)
2202+{
2203+ return (loc >= me->module_init_rx &&
2204+ loc < (me->module_init_rx + me->init_size_rx));
2205+}
2206+
2207+static inline int in_init_rw(struct module *me, void *loc)
2208+{
2209+ return (loc >= me->module_init_rw &&
2210+ loc < (me->module_init_rw + me->init_size_rw));
2211+}
2212+
2213 static inline int in_init(struct module *me, void *loc)
2214 {
2215- return (loc >= me->module_init &&
2216- loc <= (me->module_init + me->init_size));
2217+ return in_init_rx(me, loc) || in_init_rw(me, loc);
2218+}
2219+
2220+static inline int in_core_rx(struct module *me, void *loc)
2221+{
2222+ return (loc >= me->module_core_rx &&
2223+ loc < (me->module_core_rx + me->core_size_rx));
2224+}
2225+
2226+static inline int in_core_rw(struct module *me, void *loc)
2227+{
2228+ return (loc >= me->module_core_rw &&
2229+ loc < (me->module_core_rw + me->core_size_rw));
2230 }
2231
2232 static inline int in_core(struct module *me, void *loc)
2233 {
2234- return (loc >= me->module_core &&
2235- loc <= (me->module_core + me->core_size));
2236+ return in_core_rx(me, loc) || in_core_rw(me, loc);
2237 }
2238
2239 static inline int in_local(struct module *me, void *loc)
2240@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2241 }
2242
2243 /* align things a bit */
2244- me->core_size = ALIGN(me->core_size, 16);
2245- me->arch.got_offset = me->core_size;
2246- me->core_size += gots * sizeof(struct got_entry);
2247+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2248+ me->arch.got_offset = me->core_size_rw;
2249+ me->core_size_rw += gots * sizeof(struct got_entry);
2250
2251- me->core_size = ALIGN(me->core_size, 16);
2252- me->arch.fdesc_offset = me->core_size;
2253- me->core_size += fdescs * sizeof(Elf_Fdesc);
2254+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2255+ me->arch.fdesc_offset = me->core_size_rw;
2256+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2257
2258 me->arch.got_max = gots;
2259 me->arch.fdesc_max = fdescs;
2260@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2261
2262 BUG_ON(value == 0);
2263
2264- got = me->module_core + me->arch.got_offset;
2265+ got = me->module_core_rw + me->arch.got_offset;
2266 for (i = 0; got[i].addr; i++)
2267 if (got[i].addr == value)
2268 goto out;
2269@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2270 #ifdef CONFIG_64BIT
2271 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2272 {
2273- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2274+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2275
2276 if (!value) {
2277 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2278@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2279
2280 /* Create new one */
2281 fdesc->addr = value;
2282- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2283+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2284 return (Elf_Addr)fdesc;
2285 }
2286 #endif /* CONFIG_64BIT */
2287@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2288
2289 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2290 end = table + sechdrs[me->arch.unwind_section].sh_size;
2291- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2292+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2293
2294 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2295 me->arch.unwind_section, table, end, gp);
2296diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2297index 9147391..f3d949a 100644
2298--- a/arch/parisc/kernel/sys_parisc.c
2299+++ b/arch/parisc/kernel/sys_parisc.c
2300@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2301 /* At this point: (!vma || addr < vma->vm_end). */
2302 if (TASK_SIZE - len < addr)
2303 return -ENOMEM;
2304- if (!vma || addr + len <= vma->vm_start)
2305+ if (check_heap_stack_gap(vma, addr, len))
2306 return addr;
2307 addr = vma->vm_end;
2308 }
2309@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2310 /* At this point: (!vma || addr < vma->vm_end). */
2311 if (TASK_SIZE - len < addr)
2312 return -ENOMEM;
2313- if (!vma || addr + len <= vma->vm_start)
2314+ if (check_heap_stack_gap(vma, addr, len))
2315 return addr;
2316 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2317 if (addr < vma->vm_end) /* handle wraparound */
2318@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2319 if (flags & MAP_FIXED)
2320 return addr;
2321 if (!addr)
2322- addr = TASK_UNMAPPED_BASE;
2323+ addr = current->mm->mmap_base;
2324
2325 if (filp) {
2326 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2327diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2328index 8b58bf0..7afff03 100644
2329--- a/arch/parisc/kernel/traps.c
2330+++ b/arch/parisc/kernel/traps.c
2331@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2332
2333 down_read(&current->mm->mmap_sem);
2334 vma = find_vma(current->mm,regs->iaoq[0]);
2335- if (vma && (regs->iaoq[0] >= vma->vm_start)
2336- && (vma->vm_flags & VM_EXEC)) {
2337-
2338+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2339 fault_address = regs->iaoq[0];
2340 fault_space = regs->iasq[0];
2341
2342diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2343index c6afbfc..c5839f6 100644
2344--- a/arch/parisc/mm/fault.c
2345+++ b/arch/parisc/mm/fault.c
2346@@ -15,6 +15,7 @@
2347 #include <linux/sched.h>
2348 #include <linux/interrupt.h>
2349 #include <linux/module.h>
2350+#include <linux/unistd.h>
2351
2352 #include <asm/uaccess.h>
2353 #include <asm/traps.h>
2354@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2355 static unsigned long
2356 parisc_acctyp(unsigned long code, unsigned int inst)
2357 {
2358- if (code == 6 || code == 16)
2359+ if (code == 6 || code == 7 || code == 16)
2360 return VM_EXEC;
2361
2362 switch (inst & 0xf0000000) {
2363@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2364 }
2365 #endif
2366
2367+#ifdef CONFIG_PAX_PAGEEXEC
2368+/*
2369+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2370+ *
2371+ * returns 1 when task should be killed
2372+ * 2 when rt_sigreturn trampoline was detected
2373+ * 3 when unpatched PLT trampoline was detected
2374+ */
2375+static int pax_handle_fetch_fault(struct pt_regs *regs)
2376+{
2377+
2378+#ifdef CONFIG_PAX_EMUPLT
2379+ int err;
2380+
2381+ do { /* PaX: unpatched PLT emulation */
2382+ unsigned int bl, depwi;
2383+
2384+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2385+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2386+
2387+ if (err)
2388+ break;
2389+
2390+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2391+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2392+
2393+ err = get_user(ldw, (unsigned int *)addr);
2394+ err |= get_user(bv, (unsigned int *)(addr+4));
2395+ err |= get_user(ldw2, (unsigned int *)(addr+8));
2396+
2397+ if (err)
2398+ break;
2399+
2400+ if (ldw == 0x0E801096U &&
2401+ bv == 0xEAC0C000U &&
2402+ ldw2 == 0x0E881095U)
2403+ {
2404+ unsigned int resolver, map;
2405+
2406+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2407+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2408+ if (err)
2409+ break;
2410+
2411+ regs->gr[20] = instruction_pointer(regs)+8;
2412+ regs->gr[21] = map;
2413+ regs->gr[22] = resolver;
2414+ regs->iaoq[0] = resolver | 3UL;
2415+ regs->iaoq[1] = regs->iaoq[0] + 4;
2416+ return 3;
2417+ }
2418+ }
2419+ } while (0);
2420+#endif
2421+
2422+#ifdef CONFIG_PAX_EMUTRAMP
2423+
2424+#ifndef CONFIG_PAX_EMUSIGRT
2425+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2426+ return 1;
2427+#endif
2428+
2429+ do { /* PaX: rt_sigreturn emulation */
2430+ unsigned int ldi1, ldi2, bel, nop;
2431+
2432+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2433+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2434+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2435+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2436+
2437+ if (err)
2438+ break;
2439+
2440+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2441+ ldi2 == 0x3414015AU &&
2442+ bel == 0xE4008200U &&
2443+ nop == 0x08000240U)
2444+ {
2445+ regs->gr[25] = (ldi1 & 2) >> 1;
2446+ regs->gr[20] = __NR_rt_sigreturn;
2447+ regs->gr[31] = regs->iaoq[1] + 16;
2448+ regs->sr[0] = regs->iasq[1];
2449+ regs->iaoq[0] = 0x100UL;
2450+ regs->iaoq[1] = regs->iaoq[0] + 4;
2451+ regs->iasq[0] = regs->sr[2];
2452+ regs->iasq[1] = regs->sr[2];
2453+ return 2;
2454+ }
2455+ } while (0);
2456+#endif
2457+
2458+ return 1;
2459+}
2460+
2461+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2462+{
2463+ unsigned long i;
2464+
2465+ printk(KERN_ERR "PAX: bytes at PC: ");
2466+ for (i = 0; i < 5; i++) {
2467+ unsigned int c;
2468+ if (get_user(c, (unsigned int *)pc+i))
2469+ printk(KERN_CONT "???????? ");
2470+ else
2471+ printk(KERN_CONT "%08x ", c);
2472+ }
2473+ printk("\n");
2474+}
2475+#endif
2476+
2477 int fixup_exception(struct pt_regs *regs)
2478 {
2479 const struct exception_table_entry *fix;
2480@@ -192,8 +303,33 @@ good_area:
2481
2482 acc_type = parisc_acctyp(code,regs->iir);
2483
2484- if ((vma->vm_flags & acc_type) != acc_type)
2485+ if ((vma->vm_flags & acc_type) != acc_type) {
2486+
2487+#ifdef CONFIG_PAX_PAGEEXEC
2488+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2489+ (address & ~3UL) == instruction_pointer(regs))
2490+ {
2491+ up_read(&mm->mmap_sem);
2492+ switch (pax_handle_fetch_fault(regs)) {
2493+
2494+#ifdef CONFIG_PAX_EMUPLT
2495+ case 3:
2496+ return;
2497+#endif
2498+
2499+#ifdef CONFIG_PAX_EMUTRAMP
2500+ case 2:
2501+ return;
2502+#endif
2503+
2504+ }
2505+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2506+ do_group_exit(SIGKILL);
2507+ }
2508+#endif
2509+
2510 goto bad_area;
2511+ }
2512
2513 /*
2514 * If for any reason at all we couldn't handle the fault, make
2515diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
2516index c107b74..409dc0f 100644
2517--- a/arch/powerpc/Makefile
2518+++ b/arch/powerpc/Makefile
2519@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2520 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2521 CPP = $(CC) -E $(KBUILD_CFLAGS)
2522
2523+cflags-y += -Wno-sign-compare -Wno-extra
2524+
2525 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2526
2527 ifeq ($(CONFIG_PPC64),y)
2528diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
2529index 6d94d27..50d4cad 100644
2530--- a/arch/powerpc/include/asm/device.h
2531+++ b/arch/powerpc/include/asm/device.h
2532@@ -14,7 +14,7 @@ struct dev_archdata {
2533 struct device_node *of_node;
2534
2535 /* DMA operations on that device */
2536- struct dma_map_ops *dma_ops;
2537+ const struct dma_map_ops *dma_ops;
2538
2539 /*
2540 * When an iommu is in use, dma_data is used as a ptr to the base of the
2541diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
2542index e281dae..2b8a784 100644
2543--- a/arch/powerpc/include/asm/dma-mapping.h
2544+++ b/arch/powerpc/include/asm/dma-mapping.h
2545@@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
2546 #ifdef CONFIG_PPC64
2547 extern struct dma_map_ops dma_iommu_ops;
2548 #endif
2549-extern struct dma_map_ops dma_direct_ops;
2550+extern const struct dma_map_ops dma_direct_ops;
2551
2552-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2553+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2554 {
2555 /* We don't handle the NULL dev case for ISA for now. We could
2556 * do it via an out of line call but it is not needed for now. The
2557@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2558 return dev->archdata.dma_ops;
2559 }
2560
2561-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2562+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2563 {
2564 dev->archdata.dma_ops = ops;
2565 }
2566@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
2567
2568 static inline int dma_supported(struct device *dev, u64 mask)
2569 {
2570- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2571+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2572
2573 if (unlikely(dma_ops == NULL))
2574 return 0;
2575@@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
2576
2577 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2578 {
2579- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2580+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2581
2582 if (unlikely(dma_ops == NULL))
2583 return -EIO;
2584@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2585 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2586 dma_addr_t *dma_handle, gfp_t flag)
2587 {
2588- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2589+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2590 void *cpu_addr;
2591
2592 BUG_ON(!dma_ops);
2593@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2594 static inline void dma_free_coherent(struct device *dev, size_t size,
2595 void *cpu_addr, dma_addr_t dma_handle)
2596 {
2597- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2598+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2599
2600 BUG_ON(!dma_ops);
2601
2602@@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2603
2604 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2605 {
2606- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2607+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2608
2609 if (dma_ops->mapping_error)
2610 return dma_ops->mapping_error(dev, dma_addr);
2611diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2612index 5698502..5db093c 100644
2613--- a/arch/powerpc/include/asm/elf.h
2614+++ b/arch/powerpc/include/asm/elf.h
2615@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2616 the loader. We need to make sure that it is out of the way of the program
2617 that it will "exec", and that there is sufficient room for the brk. */
2618
2619-extern unsigned long randomize_et_dyn(unsigned long base);
2620-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2621+#define ELF_ET_DYN_BASE (0x20000000)
2622+
2623+#ifdef CONFIG_PAX_ASLR
2624+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2625+
2626+#ifdef __powerpc64__
2627+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2628+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2629+#else
2630+#define PAX_DELTA_MMAP_LEN 15
2631+#define PAX_DELTA_STACK_LEN 15
2632+#endif
2633+#endif
2634
2635 /*
2636 * Our registers are always unsigned longs, whether we're a 32 bit
2637@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2638 (0x7ff >> (PAGE_SHIFT - 12)) : \
2639 (0x3ffff >> (PAGE_SHIFT - 12)))
2640
2641-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2642-#define arch_randomize_brk arch_randomize_brk
2643-
2644 #endif /* __KERNEL__ */
2645
2646 /*
2647diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
2648index edfc980..1766f59 100644
2649--- a/arch/powerpc/include/asm/iommu.h
2650+++ b/arch/powerpc/include/asm/iommu.h
2651@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
2652 extern void iommu_init_early_dart(void);
2653 extern void iommu_init_early_pasemi(void);
2654
2655+/* dma-iommu.c */
2656+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2657+
2658 #ifdef CONFIG_PCI
2659 extern void pci_iommu_init(void);
2660 extern void pci_direct_iommu_init(void);
2661diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2662index 9163695..5a00112 100644
2663--- a/arch/powerpc/include/asm/kmap_types.h
2664+++ b/arch/powerpc/include/asm/kmap_types.h
2665@@ -26,6 +26,7 @@ enum km_type {
2666 KM_SOFTIRQ1,
2667 KM_PPC_SYNC_PAGE,
2668 KM_PPC_SYNC_ICACHE,
2669+ KM_CLEARPAGE,
2670 KM_TYPE_NR
2671 };
2672
2673diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2674index ff24254..fe45b21 100644
2675--- a/arch/powerpc/include/asm/page.h
2676+++ b/arch/powerpc/include/asm/page.h
2677@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2678 * and needs to be executable. This means the whole heap ends
2679 * up being executable.
2680 */
2681-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2682- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2683+#define VM_DATA_DEFAULT_FLAGS32 \
2684+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2685+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2686
2687 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2688 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2689@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2690 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2691 #endif
2692
2693+#define ktla_ktva(addr) (addr)
2694+#define ktva_ktla(addr) (addr)
2695+
2696 #ifndef __ASSEMBLY__
2697
2698 #undef STRICT_MM_TYPECHECKS
2699diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2700index 3f17b83..1f9e766 100644
2701--- a/arch/powerpc/include/asm/page_64.h
2702+++ b/arch/powerpc/include/asm/page_64.h
2703@@ -180,15 +180,18 @@ do { \
2704 * stack by default, so in the absense of a PT_GNU_STACK program header
2705 * we turn execute permission off.
2706 */
2707-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2708- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2709+#define VM_STACK_DEFAULT_FLAGS32 \
2710+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2711+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2712
2713 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2714 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2715
2716+#ifndef CONFIG_PAX_PAGEEXEC
2717 #define VM_STACK_DEFAULT_FLAGS \
2718 (test_thread_flag(TIF_32BIT) ? \
2719 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2720+#endif
2721
2722 #include <asm-generic/getorder.h>
2723
2724diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
2725index b5ea626..4030822 100644
2726--- a/arch/powerpc/include/asm/pci.h
2727+++ b/arch/powerpc/include/asm/pci.h
2728@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
2729 }
2730
2731 #ifdef CONFIG_PCI
2732-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2733-extern struct dma_map_ops *get_pci_dma_ops(void);
2734+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2735+extern const struct dma_map_ops *get_pci_dma_ops(void);
2736 #else /* CONFIG_PCI */
2737 #define set_pci_dma_ops(d)
2738 #define get_pci_dma_ops() NULL
2739diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2740index 2a5da06..d65bea2 100644
2741--- a/arch/powerpc/include/asm/pgtable.h
2742+++ b/arch/powerpc/include/asm/pgtable.h
2743@@ -2,6 +2,7 @@
2744 #define _ASM_POWERPC_PGTABLE_H
2745 #ifdef __KERNEL__
2746
2747+#include <linux/const.h>
2748 #ifndef __ASSEMBLY__
2749 #include <asm/processor.h> /* For TASK_SIZE */
2750 #include <asm/mmu.h>
2751diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2752index 4aad413..85d86bf 100644
2753--- a/arch/powerpc/include/asm/pte-hash32.h
2754+++ b/arch/powerpc/include/asm/pte-hash32.h
2755@@ -21,6 +21,7 @@
2756 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2757 #define _PAGE_USER 0x004 /* usermode access allowed */
2758 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2759+#define _PAGE_EXEC _PAGE_GUARDED
2760 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2761 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2762 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2763diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
2764index 8c34149..78f425a 100644
2765--- a/arch/powerpc/include/asm/ptrace.h
2766+++ b/arch/powerpc/include/asm/ptrace.h
2767@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
2768 } while(0)
2769
2770 struct task_struct;
2771-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2772+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2773 extern int ptrace_put_reg(struct task_struct *task, int regno,
2774 unsigned long data);
2775
2776diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2777index 32a7c30..be3a8bb 100644
2778--- a/arch/powerpc/include/asm/reg.h
2779+++ b/arch/powerpc/include/asm/reg.h
2780@@ -191,6 +191,7 @@
2781 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2782 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2783 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2784+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2785 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2786 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2787 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2788diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
2789index 8979d4c..d2fd0d3 100644
2790--- a/arch/powerpc/include/asm/swiotlb.h
2791+++ b/arch/powerpc/include/asm/swiotlb.h
2792@@ -13,7 +13,7 @@
2793
2794 #include <linux/swiotlb.h>
2795
2796-extern struct dma_map_ops swiotlb_dma_ops;
2797+extern const struct dma_map_ops swiotlb_dma_ops;
2798
2799 static inline void dma_mark_clean(void *addr, size_t size) {}
2800
2801diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2802index 094a12a..877a60a 100644
2803--- a/arch/powerpc/include/asm/system.h
2804+++ b/arch/powerpc/include/asm/system.h
2805@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2806 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2807 #endif
2808
2809-extern unsigned long arch_align_stack(unsigned long sp);
2810+#define arch_align_stack(x) ((x) & ~0xfUL)
2811
2812 /* Used in very early kernel initialization. */
2813 extern unsigned long reloc_offset(void);
2814diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2815index bd0fb84..a42a14b 100644
2816--- a/arch/powerpc/include/asm/uaccess.h
2817+++ b/arch/powerpc/include/asm/uaccess.h
2818@@ -13,6 +13,8 @@
2819 #define VERIFY_READ 0
2820 #define VERIFY_WRITE 1
2821
2822+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2823+
2824 /*
2825 * The fs value determines whether argument validity checking should be
2826 * performed or not. If get_fs() == USER_DS, checking is performed, with
2827@@ -327,52 +329,6 @@ do { \
2828 extern unsigned long __copy_tofrom_user(void __user *to,
2829 const void __user *from, unsigned long size);
2830
2831-#ifndef __powerpc64__
2832-
2833-static inline unsigned long copy_from_user(void *to,
2834- const void __user *from, unsigned long n)
2835-{
2836- unsigned long over;
2837-
2838- if (access_ok(VERIFY_READ, from, n))
2839- return __copy_tofrom_user((__force void __user *)to, from, n);
2840- if ((unsigned long)from < TASK_SIZE) {
2841- over = (unsigned long)from + n - TASK_SIZE;
2842- return __copy_tofrom_user((__force void __user *)to, from,
2843- n - over) + over;
2844- }
2845- return n;
2846-}
2847-
2848-static inline unsigned long copy_to_user(void __user *to,
2849- const void *from, unsigned long n)
2850-{
2851- unsigned long over;
2852-
2853- if (access_ok(VERIFY_WRITE, to, n))
2854- return __copy_tofrom_user(to, (__force void __user *)from, n);
2855- if ((unsigned long)to < TASK_SIZE) {
2856- over = (unsigned long)to + n - TASK_SIZE;
2857- return __copy_tofrom_user(to, (__force void __user *)from,
2858- n - over) + over;
2859- }
2860- return n;
2861-}
2862-
2863-#else /* __powerpc64__ */
2864-
2865-#define __copy_in_user(to, from, size) \
2866- __copy_tofrom_user((to), (from), (size))
2867-
2868-extern unsigned long copy_from_user(void *to, const void __user *from,
2869- unsigned long n);
2870-extern unsigned long copy_to_user(void __user *to, const void *from,
2871- unsigned long n);
2872-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2873- unsigned long n);
2874-
2875-#endif /* __powerpc64__ */
2876-
2877 static inline unsigned long __copy_from_user_inatomic(void *to,
2878 const void __user *from, unsigned long n)
2879 {
2880@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2881 if (ret == 0)
2882 return 0;
2883 }
2884+
2885+ if (!__builtin_constant_p(n))
2886+ check_object_size(to, n, false);
2887+
2888 return __copy_tofrom_user((__force void __user *)to, from, n);
2889 }
2890
2891@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2892 if (ret == 0)
2893 return 0;
2894 }
2895+
2896+ if (!__builtin_constant_p(n))
2897+ check_object_size(from, n, true);
2898+
2899 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2900 }
2901
2902@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2903 return __copy_to_user_inatomic(to, from, size);
2904 }
2905
2906+#ifndef __powerpc64__
2907+
2908+static inline unsigned long __must_check copy_from_user(void *to,
2909+ const void __user *from, unsigned long n)
2910+{
2911+ unsigned long over;
2912+
2913+ if ((long)n < 0)
2914+ return n;
2915+
2916+ if (access_ok(VERIFY_READ, from, n)) {
2917+ if (!__builtin_constant_p(n))
2918+ check_object_size(to, n, false);
2919+ return __copy_tofrom_user((__force void __user *)to, from, n);
2920+ }
2921+ if ((unsigned long)from < TASK_SIZE) {
2922+ over = (unsigned long)from + n - TASK_SIZE;
2923+ if (!__builtin_constant_p(n - over))
2924+ check_object_size(to, n - over, false);
2925+ return __copy_tofrom_user((__force void __user *)to, from,
2926+ n - over) + over;
2927+ }
2928+ return n;
2929+}
2930+
2931+static inline unsigned long __must_check copy_to_user(void __user *to,
2932+ const void *from, unsigned long n)
2933+{
2934+ unsigned long over;
2935+
2936+ if ((long)n < 0)
2937+ return n;
2938+
2939+ if (access_ok(VERIFY_WRITE, to, n)) {
2940+ if (!__builtin_constant_p(n))
2941+ check_object_size(from, n, true);
2942+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2943+ }
2944+ if ((unsigned long)to < TASK_SIZE) {
2945+ over = (unsigned long)to + n - TASK_SIZE;
2946+ if (!__builtin_constant_p(n))
2947+ check_object_size(from, n - over, true);
2948+ return __copy_tofrom_user(to, (__force void __user *)from,
2949+ n - over) + over;
2950+ }
2951+ return n;
2952+}
2953+
2954+#else /* __powerpc64__ */
2955+
2956+#define __copy_in_user(to, from, size) \
2957+ __copy_tofrom_user((to), (from), (size))
2958+
2959+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2960+{
2961+ if ((long)n < 0 || n > INT_MAX)
2962+ return n;
2963+
2964+ if (!__builtin_constant_p(n))
2965+ check_object_size(to, n, false);
2966+
2967+ if (likely(access_ok(VERIFY_READ, from, n)))
2968+ n = __copy_from_user(to, from, n);
2969+ else
2970+ memset(to, 0, n);
2971+ return n;
2972+}
2973+
2974+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2975+{
2976+ if ((long)n < 0 || n > INT_MAX)
2977+ return n;
2978+
2979+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2980+ if (!__builtin_constant_p(n))
2981+ check_object_size(from, n, true);
2982+ n = __copy_to_user(to, from, n);
2983+ }
2984+ return n;
2985+}
2986+
2987+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2988+ unsigned long n);
2989+
2990+#endif /* __powerpc64__ */
2991+
2992 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2993
2994 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2995diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
2996index bb37b1d..01fe9ce 100644
2997--- a/arch/powerpc/kernel/cacheinfo.c
2998+++ b/arch/powerpc/kernel/cacheinfo.c
2999@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3000 &cache_assoc_attr,
3001 };
3002
3003-static struct sysfs_ops cache_index_ops = {
3004+static const struct sysfs_ops cache_index_ops = {
3005 .show = cache_index_show,
3006 };
3007
3008diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3009index 37771a5..648530c 100644
3010--- a/arch/powerpc/kernel/dma-iommu.c
3011+++ b/arch/powerpc/kernel/dma-iommu.c
3012@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3013 }
3014
3015 /* We support DMA to/from any memory page via the iommu */
3016-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3017+int dma_iommu_dma_supported(struct device *dev, u64 mask)
3018 {
3019 struct iommu_table *tbl = get_iommu_table_base(dev);
3020
3021diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3022index e96cbbd..bdd6d41 100644
3023--- a/arch/powerpc/kernel/dma-swiotlb.c
3024+++ b/arch/powerpc/kernel/dma-swiotlb.c
3025@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3026 * map_page, and unmap_page on highmem, use normal dma_ops
3027 * for everything else.
3028 */
3029-struct dma_map_ops swiotlb_dma_ops = {
3030+const struct dma_map_ops swiotlb_dma_ops = {
3031 .alloc_coherent = dma_direct_alloc_coherent,
3032 .free_coherent = dma_direct_free_coherent,
3033 .map_sg = swiotlb_map_sg_attrs,
3034diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3035index 6215062..ebea59c 100644
3036--- a/arch/powerpc/kernel/dma.c
3037+++ b/arch/powerpc/kernel/dma.c
3038@@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3039 }
3040 #endif
3041
3042-struct dma_map_ops dma_direct_ops = {
3043+const struct dma_map_ops dma_direct_ops = {
3044 .alloc_coherent = dma_direct_alloc_coherent,
3045 .free_coherent = dma_direct_free_coherent,
3046 .map_sg = dma_direct_map_sg,
3047diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3048index 24dcc0e..a300455 100644
3049--- a/arch/powerpc/kernel/exceptions-64e.S
3050+++ b/arch/powerpc/kernel/exceptions-64e.S
3051@@ -455,6 +455,7 @@ storage_fault_common:
3052 std r14,_DAR(r1)
3053 std r15,_DSISR(r1)
3054 addi r3,r1,STACK_FRAME_OVERHEAD
3055+ bl .save_nvgprs
3056 mr r4,r14
3057 mr r5,r15
3058 ld r14,PACA_EXGEN+EX_R14(r13)
3059@@ -464,8 +465,7 @@ storage_fault_common:
3060 cmpdi r3,0
3061 bne- 1f
3062 b .ret_from_except_lite
3063-1: bl .save_nvgprs
3064- mr r5,r3
3065+1: mr r5,r3
3066 addi r3,r1,STACK_FRAME_OVERHEAD
3067 ld r4,_DAR(r1)
3068 bl .bad_page_fault
3069diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3070index 1808876..9fd206a 100644
3071--- a/arch/powerpc/kernel/exceptions-64s.S
3072+++ b/arch/powerpc/kernel/exceptions-64s.S
3073@@ -818,10 +818,10 @@ handle_page_fault:
3074 11: ld r4,_DAR(r1)
3075 ld r5,_DSISR(r1)
3076 addi r3,r1,STACK_FRAME_OVERHEAD
3077+ bl .save_nvgprs
3078 bl .do_page_fault
3079 cmpdi r3,0
3080 beq+ 13f
3081- bl .save_nvgprs
3082 mr r5,r3
3083 addi r3,r1,STACK_FRAME_OVERHEAD
3084 lwz r4,_DAR(r1)
3085diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3086index a4c8b38..1b09ad9 100644
3087--- a/arch/powerpc/kernel/ibmebus.c
3088+++ b/arch/powerpc/kernel/ibmebus.c
3089@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3090 return 1;
3091 }
3092
3093-static struct dma_map_ops ibmebus_dma_ops = {
3094+static const struct dma_map_ops ibmebus_dma_ops = {
3095 .alloc_coherent = ibmebus_alloc_coherent,
3096 .free_coherent = ibmebus_free_coherent,
3097 .map_sg = ibmebus_map_sg,
3098diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3099index 641c74b..8339ad7 100644
3100--- a/arch/powerpc/kernel/kgdb.c
3101+++ b/arch/powerpc/kernel/kgdb.c
3102@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3103 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3104 return 0;
3105
3106- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3107+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3108 regs->nip += 4;
3109
3110 return 1;
3111@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3112 /*
3113 * Global data
3114 */
3115-struct kgdb_arch arch_kgdb_ops = {
3116+const struct kgdb_arch arch_kgdb_ops = {
3117 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3118 };
3119
3120diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3121index 477c663..4f50234 100644
3122--- a/arch/powerpc/kernel/module.c
3123+++ b/arch/powerpc/kernel/module.c
3124@@ -31,11 +31,24 @@
3125
3126 LIST_HEAD(module_bug_list);
3127
3128+#ifdef CONFIG_PAX_KERNEXEC
3129 void *module_alloc(unsigned long size)
3130 {
3131 if (size == 0)
3132 return NULL;
3133
3134+ return vmalloc(size);
3135+}
3136+
3137+void *module_alloc_exec(unsigned long size)
3138+#else
3139+void *module_alloc(unsigned long size)
3140+#endif
3141+
3142+{
3143+ if (size == 0)
3144+ return NULL;
3145+
3146 return vmalloc_exec(size);
3147 }
3148
3149@@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3150 vfree(module_region);
3151 }
3152
3153+#ifdef CONFIG_PAX_KERNEXEC
3154+void module_free_exec(struct module *mod, void *module_region)
3155+{
3156+ module_free(mod, module_region);
3157+}
3158+#endif
3159+
3160 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3161 const Elf_Shdr *sechdrs,
3162 const char *name)
3163diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3164index f832773..0507238 100644
3165--- a/arch/powerpc/kernel/module_32.c
3166+++ b/arch/powerpc/kernel/module_32.c
3167@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3168 me->arch.core_plt_section = i;
3169 }
3170 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3171- printk("Module doesn't contain .plt or .init.plt sections.\n");
3172+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3173 return -ENOEXEC;
3174 }
3175
3176@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3177
3178 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3179 /* Init, or core PLT? */
3180- if (location >= mod->module_core
3181- && location < mod->module_core + mod->core_size)
3182+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3183+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3184 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3185- else
3186+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3187+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3188 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3189+ else {
3190+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3191+ return ~0UL;
3192+ }
3193
3194 /* Find this entry, or if that fails, the next avail. entry */
3195 while (entry->jump[0]) {
3196diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3197index cadbed6..b9bbb00 100644
3198--- a/arch/powerpc/kernel/pci-common.c
3199+++ b/arch/powerpc/kernel/pci-common.c
3200@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3201 unsigned int ppc_pci_flags = 0;
3202
3203
3204-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3205+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3206
3207-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3208+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3209 {
3210 pci_dma_ops = dma_ops;
3211 }
3212
3213-struct dma_map_ops *get_pci_dma_ops(void)
3214+const struct dma_map_ops *get_pci_dma_ops(void)
3215 {
3216 return pci_dma_ops;
3217 }
3218diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3219index 7b816da..8d5c277 100644
3220--- a/arch/powerpc/kernel/process.c
3221+++ b/arch/powerpc/kernel/process.c
3222@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3223 * Lookup NIP late so we have the best change of getting the
3224 * above info out without failing
3225 */
3226- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3227- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3228+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3229+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3230 #endif
3231 show_stack(current, (unsigned long *) regs->gpr[1]);
3232 if (!user_mode(regs))
3233@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3234 newsp = stack[0];
3235 ip = stack[STACK_FRAME_LR_SAVE];
3236 if (!firstframe || ip != lr) {
3237- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3238+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3239 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3240 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3241- printk(" (%pS)",
3242+ printk(" (%pA)",
3243 (void *)current->ret_stack[curr_frame].ret);
3244 curr_frame--;
3245 }
3246@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3247 struct pt_regs *regs = (struct pt_regs *)
3248 (sp + STACK_FRAME_OVERHEAD);
3249 lr = regs->link;
3250- printk("--- Exception: %lx at %pS\n LR = %pS\n",
3251+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
3252 regs->trap, (void *)regs->nip, (void *)lr);
3253 firstframe = 1;
3254 }
3255@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3256 }
3257
3258 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3259-
3260-unsigned long arch_align_stack(unsigned long sp)
3261-{
3262- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3263- sp -= get_random_int() & ~PAGE_MASK;
3264- return sp & ~0xf;
3265-}
3266-
3267-static inline unsigned long brk_rnd(void)
3268-{
3269- unsigned long rnd = 0;
3270-
3271- /* 8MB for 32bit, 1GB for 64bit */
3272- if (is_32bit_task())
3273- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3274- else
3275- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3276-
3277- return rnd << PAGE_SHIFT;
3278-}
3279-
3280-unsigned long arch_randomize_brk(struct mm_struct *mm)
3281-{
3282- unsigned long base = mm->brk;
3283- unsigned long ret;
3284-
3285-#ifdef CONFIG_PPC_STD_MMU_64
3286- /*
3287- * If we are using 1TB segments and we are allowed to randomise
3288- * the heap, we can put it above 1TB so it is backed by a 1TB
3289- * segment. Otherwise the heap will be in the bottom 1TB
3290- * which always uses 256MB segments and this may result in a
3291- * performance penalty.
3292- */
3293- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3294- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3295-#endif
3296-
3297- ret = PAGE_ALIGN(base + brk_rnd());
3298-
3299- if (ret < mm->brk)
3300- return mm->brk;
3301-
3302- return ret;
3303-}
3304-
3305-unsigned long randomize_et_dyn(unsigned long base)
3306-{
3307- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3308-
3309- if (ret < base)
3310- return base;
3311-
3312- return ret;
3313-}
3314diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3315index ef14988..856c4bc 100644
3316--- a/arch/powerpc/kernel/ptrace.c
3317+++ b/arch/powerpc/kernel/ptrace.c
3318@@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3319 /*
3320 * Get contents of register REGNO in task TASK.
3321 */
3322-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3323+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3324 {
3325 if (task->thread.regs == NULL)
3326 return -EIO;
3327@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3328
3329 CHECK_FULL_REGS(child->thread.regs);
3330 if (index < PT_FPR0) {
3331- tmp = ptrace_get_reg(child, (int) index);
3332+ tmp = ptrace_get_reg(child, index);
3333 } else {
3334 flush_fp_to_thread(child);
3335 tmp = ((unsigned long *)child->thread.fpr)
3336diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3337index d670429..2bc59b2 100644
3338--- a/arch/powerpc/kernel/signal_32.c
3339+++ b/arch/powerpc/kernel/signal_32.c
3340@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3341 /* Save user registers on the stack */
3342 frame = &rt_sf->uc.uc_mcontext;
3343 addr = frame;
3344- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3345+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3346 if (save_user_regs(regs, frame, 0, 1))
3347 goto badframe;
3348 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3349diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3350index 2fe6fc6..ada0d96 100644
3351--- a/arch/powerpc/kernel/signal_64.c
3352+++ b/arch/powerpc/kernel/signal_64.c
3353@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3354 current->thread.fpscr.val = 0;
3355
3356 /* Set up to return from userspace. */
3357- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3358+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3359 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3360 } else {
3361 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3362diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3363index b97c2d6..dd01a6a 100644
3364--- a/arch/powerpc/kernel/sys_ppc32.c
3365+++ b/arch/powerpc/kernel/sys_ppc32.c
3366@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3367 if (oldlenp) {
3368 if (!error) {
3369 if (get_user(oldlen, oldlenp) ||
3370- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3371+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3372+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3373 error = -EFAULT;
3374 }
3375- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3376 }
3377 return error;
3378 }
3379diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3380index 6f0ae1a..e4b6a56 100644
3381--- a/arch/powerpc/kernel/traps.c
3382+++ b/arch/powerpc/kernel/traps.c
3383@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3384 static inline void pmac_backlight_unblank(void) { }
3385 #endif
3386
3387+extern void gr_handle_kernel_exploit(void);
3388+
3389 int die(const char *str, struct pt_regs *regs, long err)
3390 {
3391 static struct {
3392@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3393 if (panic_on_oops)
3394 panic("Fatal exception");
3395
3396+ gr_handle_kernel_exploit();
3397+
3398 oops_exit();
3399 do_exit(err);
3400
3401diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3402index 137dc22..fe57a79 100644
3403--- a/arch/powerpc/kernel/vdso.c
3404+++ b/arch/powerpc/kernel/vdso.c
3405@@ -36,6 +36,7 @@
3406 #include <asm/firmware.h>
3407 #include <asm/vdso.h>
3408 #include <asm/vdso_datapage.h>
3409+#include <asm/mman.h>
3410
3411 #include "setup.h"
3412
3413@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3414 vdso_base = VDSO32_MBASE;
3415 #endif
3416
3417- current->mm->context.vdso_base = 0;
3418+ current->mm->context.vdso_base = ~0UL;
3419
3420 /* vDSO has a problem and was disabled, just don't "enable" it for the
3421 * process
3422@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3423 vdso_base = get_unmapped_area(NULL, vdso_base,
3424 (vdso_pages << PAGE_SHIFT) +
3425 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3426- 0, 0);
3427+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
3428 if (IS_ERR_VALUE(vdso_base)) {
3429 rc = vdso_base;
3430 goto fail_mmapsem;
3431diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3432index 77f6421..829564a 100644
3433--- a/arch/powerpc/kernel/vio.c
3434+++ b/arch/powerpc/kernel/vio.c
3435@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3436 vio_cmo_dealloc(viodev, alloc_size);
3437 }
3438
3439-struct dma_map_ops vio_dma_mapping_ops = {
3440+static const struct dma_map_ops vio_dma_mapping_ops = {
3441 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3442 .free_coherent = vio_dma_iommu_free_coherent,
3443 .map_sg = vio_dma_iommu_map_sg,
3444 .unmap_sg = vio_dma_iommu_unmap_sg,
3445+ .dma_supported = dma_iommu_dma_supported,
3446 .map_page = vio_dma_iommu_map_page,
3447 .unmap_page = vio_dma_iommu_unmap_page,
3448
3449@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3450
3451 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3452 {
3453- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3454 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3455 }
3456
3457diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3458index 5eea6f3..5d10396 100644
3459--- a/arch/powerpc/lib/usercopy_64.c
3460+++ b/arch/powerpc/lib/usercopy_64.c
3461@@ -9,22 +9,6 @@
3462 #include <linux/module.h>
3463 #include <asm/uaccess.h>
3464
3465-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3466-{
3467- if (likely(access_ok(VERIFY_READ, from, n)))
3468- n = __copy_from_user(to, from, n);
3469- else
3470- memset(to, 0, n);
3471- return n;
3472-}
3473-
3474-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3475-{
3476- if (likely(access_ok(VERIFY_WRITE, to, n)))
3477- n = __copy_to_user(to, from, n);
3478- return n;
3479-}
3480-
3481 unsigned long copy_in_user(void __user *to, const void __user *from,
3482 unsigned long n)
3483 {
3484@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3485 return n;
3486 }
3487
3488-EXPORT_SYMBOL(copy_from_user);
3489-EXPORT_SYMBOL(copy_to_user);
3490 EXPORT_SYMBOL(copy_in_user);
3491
3492diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3493index e7dae82..877ce0d 100644
3494--- a/arch/powerpc/mm/fault.c
3495+++ b/arch/powerpc/mm/fault.c
3496@@ -30,6 +30,10 @@
3497 #include <linux/kprobes.h>
3498 #include <linux/kdebug.h>
3499 #include <linux/perf_event.h>
3500+#include <linux/slab.h>
3501+#include <linux/pagemap.h>
3502+#include <linux/compiler.h>
3503+#include <linux/unistd.h>
3504
3505 #include <asm/firmware.h>
3506 #include <asm/page.h>
3507@@ -40,6 +44,7 @@
3508 #include <asm/uaccess.h>
3509 #include <asm/tlbflush.h>
3510 #include <asm/siginfo.h>
3511+#include <asm/ptrace.h>
3512
3513
3514 #ifdef CONFIG_KPROBES
3515@@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
3516 }
3517 #endif
3518
3519+#ifdef CONFIG_PAX_PAGEEXEC
3520+/*
3521+ * PaX: decide what to do with offenders (regs->nip = fault address)
3522+ *
3523+ * returns 1 when task should be killed
3524+ */
3525+static int pax_handle_fetch_fault(struct pt_regs *regs)
3526+{
3527+ return 1;
3528+}
3529+
3530+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3531+{
3532+ unsigned long i;
3533+
3534+ printk(KERN_ERR "PAX: bytes at PC: ");
3535+ for (i = 0; i < 5; i++) {
3536+ unsigned int c;
3537+ if (get_user(c, (unsigned int __user *)pc+i))
3538+ printk(KERN_CONT "???????? ");
3539+ else
3540+ printk(KERN_CONT "%08x ", c);
3541+ }
3542+ printk("\n");
3543+}
3544+#endif
3545+
3546 /*
3547 * Check whether the instruction at regs->nip is a store using
3548 * an update addressing form which will update r1.
3549@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
3550 * indicate errors in DSISR but can validly be set in SRR1.
3551 */
3552 if (trap == 0x400)
3553- error_code &= 0x48200000;
3554+ error_code &= 0x58200000;
3555 else
3556 is_write = error_code & DSISR_ISSTORE;
3557 #else
3558@@ -250,7 +282,7 @@ good_area:
3559 * "undefined". Of those that can be set, this is the only
3560 * one which seems bad.
3561 */
3562- if (error_code & 0x10000000)
3563+ if (error_code & DSISR_GUARDED)
3564 /* Guarded storage error. */
3565 goto bad_area;
3566 #endif /* CONFIG_8xx */
3567@@ -265,7 +297,7 @@ good_area:
3568 * processors use the same I/D cache coherency mechanism
3569 * as embedded.
3570 */
3571- if (error_code & DSISR_PROTFAULT)
3572+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3573 goto bad_area;
3574 #endif /* CONFIG_PPC_STD_MMU */
3575
3576@@ -335,6 +367,23 @@ bad_area:
3577 bad_area_nosemaphore:
3578 /* User mode accesses cause a SIGSEGV */
3579 if (user_mode(regs)) {
3580+
3581+#ifdef CONFIG_PAX_PAGEEXEC
3582+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3583+#ifdef CONFIG_PPC_STD_MMU
3584+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3585+#else
3586+ if (is_exec && regs->nip == address) {
3587+#endif
3588+ switch (pax_handle_fetch_fault(regs)) {
3589+ }
3590+
3591+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3592+ do_group_exit(SIGKILL);
3593+ }
3594+ }
3595+#endif
3596+
3597 _exception(SIGSEGV, regs, code, address);
3598 return 0;
3599 }
3600diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
3601index 5973631..ad617af 100644
3602--- a/arch/powerpc/mm/mem.c
3603+++ b/arch/powerpc/mm/mem.c
3604@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
3605 {
3606 unsigned long lmb_next_region_start_pfn,
3607 lmb_region_max_pfn;
3608- int i;
3609+ unsigned int i;
3610
3611 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3612 lmb_region_max_pfn =
3613diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
3614index 0d957a4..26d968f 100644
3615--- a/arch/powerpc/mm/mmap_64.c
3616+++ b/arch/powerpc/mm/mmap_64.c
3617@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3618 */
3619 if (mmap_is_legacy()) {
3620 mm->mmap_base = TASK_UNMAPPED_BASE;
3621+
3622+#ifdef CONFIG_PAX_RANDMMAP
3623+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3624+ mm->mmap_base += mm->delta_mmap;
3625+#endif
3626+
3627 mm->get_unmapped_area = arch_get_unmapped_area;
3628 mm->unmap_area = arch_unmap_area;
3629 } else {
3630 mm->mmap_base = mmap_base();
3631+
3632+#ifdef CONFIG_PAX_RANDMMAP
3633+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3634+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3635+#endif
3636+
3637 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3638 mm->unmap_area = arch_unmap_area_topdown;
3639 }
3640diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3641index ba51948..23009d9 100644
3642--- a/arch/powerpc/mm/slice.c
3643+++ b/arch/powerpc/mm/slice.c
3644@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3645 if ((mm->task_size - len) < addr)
3646 return 0;
3647 vma = find_vma(mm, addr);
3648- return (!vma || (addr + len) <= vma->vm_start);
3649+ return check_heap_stack_gap(vma, addr, len);
3650 }
3651
3652 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3653@@ -256,7 +256,7 @@ full_search:
3654 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3655 continue;
3656 }
3657- if (!vma || addr + len <= vma->vm_start) {
3658+ if (check_heap_stack_gap(vma, addr, len)) {
3659 /*
3660 * Remember the place where we stopped the search:
3661 */
3662@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3663 }
3664 }
3665
3666- addr = mm->mmap_base;
3667- while (addr > len) {
3668+ if (mm->mmap_base < len)
3669+ addr = -ENOMEM;
3670+ else
3671+ addr = mm->mmap_base - len;
3672+
3673+ while (!IS_ERR_VALUE(addr)) {
3674 /* Go down by chunk size */
3675- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3676+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3677
3678 /* Check for hit with different page size */
3679 mask = slice_range_to_mask(addr, len);
3680@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3681 * return with success:
3682 */
3683 vma = find_vma(mm, addr);
3684- if (!vma || (addr + len) <= vma->vm_start) {
3685+ if (check_heap_stack_gap(vma, addr, len)) {
3686 /* remember the address as a hint for next time */
3687 if (use_cache)
3688 mm->free_area_cache = addr;
3689@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3690 mm->cached_hole_size = vma->vm_start - addr;
3691
3692 /* try just below the current vma->vm_start */
3693- addr = vma->vm_start;
3694+ addr = skip_heap_stack_gap(vma, len);
3695 }
3696
3697 /*
3698@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3699 if (fixed && addr > (mm->task_size - len))
3700 return -EINVAL;
3701
3702+#ifdef CONFIG_PAX_RANDMMAP
3703+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3704+ addr = 0;
3705+#endif
3706+
3707 /* If hint, make sure it matches our alignment restrictions */
3708 if (!fixed && addr) {
3709 addr = _ALIGN_UP(addr, 1ul << pshift);
3710diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
3711index b5c753d..8f01abe 100644
3712--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
3713+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
3714@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3715 lite5200_pm_target_state = PM_SUSPEND_ON;
3716 }
3717
3718-static struct platform_suspend_ops lite5200_pm_ops = {
3719+static const struct platform_suspend_ops lite5200_pm_ops = {
3720 .valid = lite5200_pm_valid,
3721 .begin = lite5200_pm_begin,
3722 .prepare = lite5200_pm_prepare,
3723diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3724index a55b0b6..478c18e 100644
3725--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3726+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3727@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3728 iounmap(mbar);
3729 }
3730
3731-static struct platform_suspend_ops mpc52xx_pm_ops = {
3732+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3733 .valid = mpc52xx_pm_valid,
3734 .prepare = mpc52xx_pm_prepare,
3735 .enter = mpc52xx_pm_enter,
3736diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
3737index 08e65fc..643d3ac 100644
3738--- a/arch/powerpc/platforms/83xx/suspend.c
3739+++ b/arch/powerpc/platforms/83xx/suspend.c
3740@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3741 return ret;
3742 }
3743
3744-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3745+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3746 .valid = mpc83xx_suspend_valid,
3747 .begin = mpc83xx_suspend_begin,
3748 .enter = mpc83xx_suspend_enter,
3749diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
3750index ca5bfdf..1602e09 100644
3751--- a/arch/powerpc/platforms/cell/iommu.c
3752+++ b/arch/powerpc/platforms/cell/iommu.c
3753@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
3754
3755 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3756
3757-struct dma_map_ops dma_iommu_fixed_ops = {
3758+const struct dma_map_ops dma_iommu_fixed_ops = {
3759 .alloc_coherent = dma_fixed_alloc_coherent,
3760 .free_coherent = dma_fixed_free_coherent,
3761 .map_sg = dma_fixed_map_sg,
3762diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
3763index e34b305..20e48ec 100644
3764--- a/arch/powerpc/platforms/ps3/system-bus.c
3765+++ b/arch/powerpc/platforms/ps3/system-bus.c
3766@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
3767 return mask >= DMA_BIT_MASK(32);
3768 }
3769
3770-static struct dma_map_ops ps3_sb_dma_ops = {
3771+static const struct dma_map_ops ps3_sb_dma_ops = {
3772 .alloc_coherent = ps3_alloc_coherent,
3773 .free_coherent = ps3_free_coherent,
3774 .map_sg = ps3_sb_map_sg,
3775@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
3776 .unmap_page = ps3_unmap_page,
3777 };
3778
3779-static struct dma_map_ops ps3_ioc0_dma_ops = {
3780+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3781 .alloc_coherent = ps3_alloc_coherent,
3782 .free_coherent = ps3_free_coherent,
3783 .map_sg = ps3_ioc0_map_sg,
3784diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
3785index f0e6f28..60d53ed 100644
3786--- a/arch/powerpc/platforms/pseries/Kconfig
3787+++ b/arch/powerpc/platforms/pseries/Kconfig
3788@@ -2,6 +2,8 @@ config PPC_PSERIES
3789 depends on PPC64 && PPC_BOOK3S
3790 bool "IBM pSeries & new (POWER5-based) iSeries"
3791 select MPIC
3792+ select PCI_MSI
3793+ select XICS
3794 select PPC_I8259
3795 select PPC_RTAS
3796 select RTAS_ERROR_LOGGING
3797diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
3798index 43c0aca..42c045b 100644
3799--- a/arch/s390/Kconfig
3800+++ b/arch/s390/Kconfig
3801@@ -194,28 +194,26 @@ config AUDIT_ARCH
3802
3803 config S390_SWITCH_AMODE
3804 bool "Switch kernel/user addressing modes"
3805+ default y
3806 help
3807 This option allows to switch the addressing modes of kernel and user
3808- space. The kernel parameter switch_amode=on will enable this feature,
3809- default is disabled. Enabling this (via kernel parameter) on machines
3810- earlier than IBM System z9-109 EC/BC will reduce system performance.
3811+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3812+ will reduce system performance.
3813
3814 Note that this option will also be selected by selecting the execute
3815- protection option below. Enabling the execute protection via the
3816- noexec kernel parameter will also switch the addressing modes,
3817- independent of the switch_amode kernel parameter.
3818+ protection option below. Enabling the execute protection will also
3819+ switch the addressing modes, independent of this option.
3820
3821
3822 config S390_EXEC_PROTECT
3823 bool "Data execute protection"
3824+ default y
3825 select S390_SWITCH_AMODE
3826 help
3827 This option allows to enable a buffer overflow protection for user
3828 space programs and it also selects the addressing mode option above.
3829- The kernel parameter noexec=on will enable this feature and also
3830- switch the addressing modes, default is disabled. Enabling this (via
3831- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3832- will reduce system performance.
3833+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3834+ reduce system performance.
3835
3836 comment "Code generation options"
3837
3838diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
3839index e885442..5e6c303 100644
3840--- a/arch/s390/include/asm/elf.h
3841+++ b/arch/s390/include/asm/elf.h
3842@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3843 that it will "exec", and that there is sufficient room for the brk. */
3844 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3845
3846+#ifdef CONFIG_PAX_ASLR
3847+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3848+
3849+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3850+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3851+#endif
3852+
3853 /* This yields a mask that user programs can use to figure out what
3854 instruction set this CPU supports. */
3855
3856diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
3857index e37478e..9ce0e9f 100644
3858--- a/arch/s390/include/asm/setup.h
3859+++ b/arch/s390/include/asm/setup.h
3860@@ -50,13 +50,13 @@ extern unsigned long memory_end;
3861 void detect_memory_layout(struct mem_chunk chunk[]);
3862
3863 #ifdef CONFIG_S390_SWITCH_AMODE
3864-extern unsigned int switch_amode;
3865+#define switch_amode (1)
3866 #else
3867 #define switch_amode (0)
3868 #endif
3869
3870 #ifdef CONFIG_S390_EXEC_PROTECT
3871-extern unsigned int s390_noexec;
3872+#define s390_noexec (1)
3873 #else
3874 #define s390_noexec (0)
3875 #endif
3876diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
3877index 8377e91..e28e6f1 100644
3878--- a/arch/s390/include/asm/uaccess.h
3879+++ b/arch/s390/include/asm/uaccess.h
3880@@ -232,6 +232,10 @@ static inline unsigned long __must_check
3881 copy_to_user(void __user *to, const void *from, unsigned long n)
3882 {
3883 might_fault();
3884+
3885+ if ((long)n < 0)
3886+ return n;
3887+
3888 if (access_ok(VERIFY_WRITE, to, n))
3889 n = __copy_to_user(to, from, n);
3890 return n;
3891@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
3892 static inline unsigned long __must_check
3893 __copy_from_user(void *to, const void __user *from, unsigned long n)
3894 {
3895+ if ((long)n < 0)
3896+ return n;
3897+
3898 if (__builtin_constant_p(n) && (n <= 256))
3899 return uaccess.copy_from_user_small(n, from, to);
3900 else
3901@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3902 copy_from_user(void *to, const void __user *from, unsigned long n)
3903 {
3904 might_fault();
3905+
3906+ if ((long)n < 0)
3907+ return n;
3908+
3909 if (access_ok(VERIFY_READ, from, n))
3910 n = __copy_from_user(to, from, n);
3911 else
3912diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
3913index 639380a..72e3c02 100644
3914--- a/arch/s390/kernel/module.c
3915+++ b/arch/s390/kernel/module.c
3916@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
3917
3918 /* Increase core size by size of got & plt and set start
3919 offsets for got and plt. */
3920- me->core_size = ALIGN(me->core_size, 4);
3921- me->arch.got_offset = me->core_size;
3922- me->core_size += me->arch.got_size;
3923- me->arch.plt_offset = me->core_size;
3924- me->core_size += me->arch.plt_size;
3925+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3926+ me->arch.got_offset = me->core_size_rw;
3927+ me->core_size_rw += me->arch.got_size;
3928+ me->arch.plt_offset = me->core_size_rx;
3929+ me->core_size_rx += me->arch.plt_size;
3930 return 0;
3931 }
3932
3933@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3934 if (info->got_initialized == 0) {
3935 Elf_Addr *gotent;
3936
3937- gotent = me->module_core + me->arch.got_offset +
3938+ gotent = me->module_core_rw + me->arch.got_offset +
3939 info->got_offset;
3940 *gotent = val;
3941 info->got_initialized = 1;
3942@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3943 else if (r_type == R_390_GOTENT ||
3944 r_type == R_390_GOTPLTENT)
3945 *(unsigned int *) loc =
3946- (val + (Elf_Addr) me->module_core - loc) >> 1;
3947+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3948 else if (r_type == R_390_GOT64 ||
3949 r_type == R_390_GOTPLT64)
3950 *(unsigned long *) loc = val;
3951@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3952 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3953 if (info->plt_initialized == 0) {
3954 unsigned int *ip;
3955- ip = me->module_core + me->arch.plt_offset +
3956+ ip = me->module_core_rx + me->arch.plt_offset +
3957 info->plt_offset;
3958 #ifndef CONFIG_64BIT
3959 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3960@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3961 val - loc + 0xffffUL < 0x1ffffeUL) ||
3962 (r_type == R_390_PLT32DBL &&
3963 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3964- val = (Elf_Addr) me->module_core +
3965+ val = (Elf_Addr) me->module_core_rx +
3966 me->arch.plt_offset +
3967 info->plt_offset;
3968 val += rela->r_addend - loc;
3969@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3970 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3971 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3972 val = val + rela->r_addend -
3973- ((Elf_Addr) me->module_core + me->arch.got_offset);
3974+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3975 if (r_type == R_390_GOTOFF16)
3976 *(unsigned short *) loc = val;
3977 else if (r_type == R_390_GOTOFF32)
3978@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3979 break;
3980 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3981 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3982- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3983+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3984 rela->r_addend - loc;
3985 if (r_type == R_390_GOTPC)
3986 *(unsigned int *) loc = val;
3987diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
3988index 061479f..dbfb08c 100644
3989--- a/arch/s390/kernel/setup.c
3990+++ b/arch/s390/kernel/setup.c
3991@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
3992 early_param("mem", early_parse_mem);
3993
3994 #ifdef CONFIG_S390_SWITCH_AMODE
3995-unsigned int switch_amode = 0;
3996-EXPORT_SYMBOL_GPL(switch_amode);
3997-
3998 static int set_amode_and_uaccess(unsigned long user_amode,
3999 unsigned long user32_amode)
4000 {
4001@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4002 return 0;
4003 }
4004 }
4005-
4006-/*
4007- * Switch kernel/user addressing modes?
4008- */
4009-static int __init early_parse_switch_amode(char *p)
4010-{
4011- switch_amode = 1;
4012- return 0;
4013-}
4014-early_param("switch_amode", early_parse_switch_amode);
4015-
4016 #else /* CONFIG_S390_SWITCH_AMODE */
4017 static inline int set_amode_and_uaccess(unsigned long user_amode,
4018 unsigned long user32_amode)
4019@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4020 }
4021 #endif /* CONFIG_S390_SWITCH_AMODE */
4022
4023-#ifdef CONFIG_S390_EXEC_PROTECT
4024-unsigned int s390_noexec = 0;
4025-EXPORT_SYMBOL_GPL(s390_noexec);
4026-
4027-/*
4028- * Enable execute protection?
4029- */
4030-static int __init early_parse_noexec(char *p)
4031-{
4032- if (!strncmp(p, "off", 3))
4033- return 0;
4034- switch_amode = 1;
4035- s390_noexec = 1;
4036- return 0;
4037-}
4038-early_param("noexec", early_parse_noexec);
4039-#endif /* CONFIG_S390_EXEC_PROTECT */
4040-
4041 static void setup_addressing_mode(void)
4042 {
4043 if (s390_noexec) {
4044diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4045index f4558cc..e461f37 100644
4046--- a/arch/s390/mm/mmap.c
4047+++ b/arch/s390/mm/mmap.c
4048@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4049 */
4050 if (mmap_is_legacy()) {
4051 mm->mmap_base = TASK_UNMAPPED_BASE;
4052+
4053+#ifdef CONFIG_PAX_RANDMMAP
4054+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4055+ mm->mmap_base += mm->delta_mmap;
4056+#endif
4057+
4058 mm->get_unmapped_area = arch_get_unmapped_area;
4059 mm->unmap_area = arch_unmap_area;
4060 } else {
4061 mm->mmap_base = mmap_base();
4062+
4063+#ifdef CONFIG_PAX_RANDMMAP
4064+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4065+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4066+#endif
4067+
4068 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4069 mm->unmap_area = arch_unmap_area_topdown;
4070 }
4071@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4072 */
4073 if (mmap_is_legacy()) {
4074 mm->mmap_base = TASK_UNMAPPED_BASE;
4075+
4076+#ifdef CONFIG_PAX_RANDMMAP
4077+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4078+ mm->mmap_base += mm->delta_mmap;
4079+#endif
4080+
4081 mm->get_unmapped_area = s390_get_unmapped_area;
4082 mm->unmap_area = arch_unmap_area;
4083 } else {
4084 mm->mmap_base = mmap_base();
4085+
4086+#ifdef CONFIG_PAX_RANDMMAP
4087+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4088+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4089+#endif
4090+
4091 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4092 mm->unmap_area = arch_unmap_area_topdown;
4093 }
4094diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4095index 589d5c7..669e274 100644
4096--- a/arch/score/include/asm/system.h
4097+++ b/arch/score/include/asm/system.h
4098@@ -17,7 +17,7 @@ do { \
4099 #define finish_arch_switch(prev) do {} while (0)
4100
4101 typedef void (*vi_handler_t)(void);
4102-extern unsigned long arch_align_stack(unsigned long sp);
4103+#define arch_align_stack(x) (x)
4104
4105 #define mb() barrier()
4106 #define rmb() barrier()
4107diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4108index 25d0803..d6c8e36 100644
4109--- a/arch/score/kernel/process.c
4110+++ b/arch/score/kernel/process.c
4111@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4112
4113 return task_pt_regs(task)->cp0_epc;
4114 }
4115-
4116-unsigned long arch_align_stack(unsigned long sp)
4117-{
4118- return sp;
4119-}
4120diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4121index d936c1a..304a252 100644
4122--- a/arch/sh/boards/mach-hp6xx/pm.c
4123+++ b/arch/sh/boards/mach-hp6xx/pm.c
4124@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4125 return 0;
4126 }
4127
4128-static struct platform_suspend_ops hp6x0_pm_ops = {
4129+static const struct platform_suspend_ops hp6x0_pm_ops = {
4130 .enter = hp6x0_pm_enter,
4131 .valid = suspend_valid_only_mem,
4132 };
4133diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4134index 8a8a993..7b3079b 100644
4135--- a/arch/sh/kernel/cpu/sh4/sq.c
4136+++ b/arch/sh/kernel/cpu/sh4/sq.c
4137@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4138 NULL,
4139 };
4140
4141-static struct sysfs_ops sq_sysfs_ops = {
4142+static const struct sysfs_ops sq_sysfs_ops = {
4143 .show = sq_sysfs_show,
4144 .store = sq_sysfs_store,
4145 };
4146diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4147index ee3c2aa..c49cee6 100644
4148--- a/arch/sh/kernel/cpu/shmobile/pm.c
4149+++ b/arch/sh/kernel/cpu/shmobile/pm.c
4150@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4151 return 0;
4152 }
4153
4154-static struct platform_suspend_ops sh_pm_ops = {
4155+static const struct platform_suspend_ops sh_pm_ops = {
4156 .enter = sh_pm_enter,
4157 .valid = suspend_valid_only_mem,
4158 };
4159diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4160index 3e532d0..9faa306 100644
4161--- a/arch/sh/kernel/kgdb.c
4162+++ b/arch/sh/kernel/kgdb.c
4163@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4164 {
4165 }
4166
4167-struct kgdb_arch arch_kgdb_ops = {
4168+const struct kgdb_arch arch_kgdb_ops = {
4169 /* Breakpoint instruction: trapa #0x3c */
4170 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4171 .gdb_bpt_instr = { 0x3c, 0xc3 },
4172diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4173index afeb710..d1d1289 100644
4174--- a/arch/sh/mm/mmap.c
4175+++ b/arch/sh/mm/mmap.c
4176@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4177 addr = PAGE_ALIGN(addr);
4178
4179 vma = find_vma(mm, addr);
4180- if (TASK_SIZE - len >= addr &&
4181- (!vma || addr + len <= vma->vm_start))
4182+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4183 return addr;
4184 }
4185
4186@@ -106,7 +105,7 @@ full_search:
4187 }
4188 return -ENOMEM;
4189 }
4190- if (likely(!vma || addr + len <= vma->vm_start)) {
4191+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4192 /*
4193 * Remember the place where we stopped the search:
4194 */
4195@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4196 addr = PAGE_ALIGN(addr);
4197
4198 vma = find_vma(mm, addr);
4199- if (TASK_SIZE - len >= addr &&
4200- (!vma || addr + len <= vma->vm_start))
4201+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4202 return addr;
4203 }
4204
4205@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4206 /* make sure it can fit in the remaining address space */
4207 if (likely(addr > len)) {
4208 vma = find_vma(mm, addr-len);
4209- if (!vma || addr <= vma->vm_start) {
4210+ if (check_heap_stack_gap(vma, addr - len, len)) {
4211 /* remember the address as a hint for next time */
4212 return (mm->free_area_cache = addr-len);
4213 }
4214@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4215 if (unlikely(mm->mmap_base < len))
4216 goto bottomup;
4217
4218- addr = mm->mmap_base-len;
4219- if (do_colour_align)
4220- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4221+ addr = mm->mmap_base - len;
4222
4223 do {
4224+ if (do_colour_align)
4225+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4226 /*
4227 * Lookup failure means no vma is above this address,
4228 * else if new region fits below vma->vm_start,
4229 * return with success:
4230 */
4231 vma = find_vma(mm, addr);
4232- if (likely(!vma || addr+len <= vma->vm_start)) {
4233+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4234 /* remember the address as a hint for next time */
4235 return (mm->free_area_cache = addr);
4236 }
4237@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4238 mm->cached_hole_size = vma->vm_start - addr;
4239
4240 /* try just below the current vma->vm_start */
4241- addr = vma->vm_start-len;
4242- if (do_colour_align)
4243- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4244- } while (likely(len < vma->vm_start));
4245+ addr = skip_heap_stack_gap(vma, len);
4246+ } while (!IS_ERR_VALUE(addr));
4247
4248 bottomup:
4249 /*
4250diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4251index 113225b..7fd04e7 100644
4252--- a/arch/sparc/Makefile
4253+++ b/arch/sparc/Makefile
4254@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4255 # Export what is needed by arch/sparc/boot/Makefile
4256 export VMLINUX_INIT VMLINUX_MAIN
4257 VMLINUX_INIT := $(head-y) $(init-y)
4258-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4259+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4260 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4261 VMLINUX_MAIN += $(drivers-y) $(net-y)
4262
4263diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4264index f5cc06f..f858d47 100644
4265--- a/arch/sparc/include/asm/atomic_64.h
4266+++ b/arch/sparc/include/asm/atomic_64.h
4267@@ -14,18 +14,40 @@
4268 #define ATOMIC64_INIT(i) { (i) }
4269
4270 #define atomic_read(v) ((v)->counter)
4271+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4272+{
4273+ return v->counter;
4274+}
4275 #define atomic64_read(v) ((v)->counter)
4276+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4277+{
4278+ return v->counter;
4279+}
4280
4281 #define atomic_set(v, i) (((v)->counter) = i)
4282+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4283+{
4284+ v->counter = i;
4285+}
4286 #define atomic64_set(v, i) (((v)->counter) = i)
4287+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4288+{
4289+ v->counter = i;
4290+}
4291
4292 extern void atomic_add(int, atomic_t *);
4293+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4294 extern void atomic64_add(long, atomic64_t *);
4295+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4296 extern void atomic_sub(int, atomic_t *);
4297+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4298 extern void atomic64_sub(long, atomic64_t *);
4299+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4300
4301 extern int atomic_add_ret(int, atomic_t *);
4302+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4303 extern long atomic64_add_ret(long, atomic64_t *);
4304+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4305 extern int atomic_sub_ret(int, atomic_t *);
4306 extern long atomic64_sub_ret(long, atomic64_t *);
4307
4308@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4309 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4310
4311 #define atomic_inc_return(v) atomic_add_ret(1, v)
4312+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4313+{
4314+ return atomic_add_ret_unchecked(1, v);
4315+}
4316 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4317+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4318+{
4319+ return atomic64_add_ret_unchecked(1, v);
4320+}
4321
4322 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4323 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4324
4325 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4326+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4327+{
4328+ return atomic_add_ret_unchecked(i, v);
4329+}
4330 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4331+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4332+{
4333+ return atomic64_add_ret_unchecked(i, v);
4334+}
4335
4336 /*
4337 * atomic_inc_and_test - increment and test
4338@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4339 * other cases.
4340 */
4341 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4342+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4343+{
4344+ return atomic_inc_return_unchecked(v) == 0;
4345+}
4346 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4347
4348 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4349@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4350 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4351
4352 #define atomic_inc(v) atomic_add(1, v)
4353+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4354+{
4355+ atomic_add_unchecked(1, v);
4356+}
4357 #define atomic64_inc(v) atomic64_add(1, v)
4358+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4359+{
4360+ atomic64_add_unchecked(1, v);
4361+}
4362
4363 #define atomic_dec(v) atomic_sub(1, v)
4364+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4365+{
4366+ atomic_sub_unchecked(1, v);
4367+}
4368 #define atomic64_dec(v) atomic64_sub(1, v)
4369+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4370+{
4371+ atomic64_sub_unchecked(1, v);
4372+}
4373
4374 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4375 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4376
4377 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4378+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4379+{
4380+ return cmpxchg(&v->counter, old, new);
4381+}
4382 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4383+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4384+{
4385+ return xchg(&v->counter, new);
4386+}
4387
4388 static inline int atomic_add_unless(atomic_t *v, int a, int u)
4389 {
4390- int c, old;
4391+ int c, old, new;
4392 c = atomic_read(v);
4393 for (;;) {
4394- if (unlikely(c == (u)))
4395+ if (unlikely(c == u))
4396 break;
4397- old = atomic_cmpxchg((v), c, c + (a));
4398+
4399+ asm volatile("addcc %2, %0, %0\n"
4400+
4401+#ifdef CONFIG_PAX_REFCOUNT
4402+ "tvs %%icc, 6\n"
4403+#endif
4404+
4405+ : "=r" (new)
4406+ : "0" (c), "ir" (a)
4407+ : "cc");
4408+
4409+ old = atomic_cmpxchg(v, c, new);
4410 if (likely(old == c))
4411 break;
4412 c = old;
4413 }
4414- return c != (u);
4415+ return c != u;
4416 }
4417
4418 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
4419@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
4420 #define atomic64_cmpxchg(v, o, n) \
4421 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4422 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4423+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4424+{
4425+ return xchg(&v->counter, new);
4426+}
4427
4428 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4429 {
4430- long c, old;
4431+ long c, old, new;
4432 c = atomic64_read(v);
4433 for (;;) {
4434- if (unlikely(c == (u)))
4435+ if (unlikely(c == u))
4436 break;
4437- old = atomic64_cmpxchg((v), c, c + (a));
4438+
4439+ asm volatile("addcc %2, %0, %0\n"
4440+
4441+#ifdef CONFIG_PAX_REFCOUNT
4442+ "tvs %%xcc, 6\n"
4443+#endif
4444+
4445+ : "=r" (new)
4446+ : "0" (c), "ir" (a)
4447+ : "cc");
4448+
4449+ old = atomic64_cmpxchg(v, c, new);
4450 if (likely(old == c))
4451 break;
4452 c = old;
4453 }
4454- return c != (u);
4455+ return c != u;
4456 }
4457
4458 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4459diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4460index 41f85ae..fb54d5e 100644
4461--- a/arch/sparc/include/asm/cache.h
4462+++ b/arch/sparc/include/asm/cache.h
4463@@ -8,7 +8,7 @@
4464 #define _SPARC_CACHE_H
4465
4466 #define L1_CACHE_SHIFT 5
4467-#define L1_CACHE_BYTES 32
4468+#define L1_CACHE_BYTES 32UL
4469 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
4470
4471 #ifdef CONFIG_SPARC32
4472diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
4473index 5a8c308..38def92 100644
4474--- a/arch/sparc/include/asm/dma-mapping.h
4475+++ b/arch/sparc/include/asm/dma-mapping.h
4476@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
4477 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
4478 #define dma_is_consistent(d, h) (1)
4479
4480-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
4481+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
4482 extern struct bus_type pci_bus_type;
4483
4484-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4485+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
4486 {
4487 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
4488 if (dev->bus == &pci_bus_type)
4489@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4490 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4491 dma_addr_t *dma_handle, gfp_t flag)
4492 {
4493- struct dma_map_ops *ops = get_dma_ops(dev);
4494+ const struct dma_map_ops *ops = get_dma_ops(dev);
4495 void *cpu_addr;
4496
4497 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
4498@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4499 static inline void dma_free_coherent(struct device *dev, size_t size,
4500 void *cpu_addr, dma_addr_t dma_handle)
4501 {
4502- struct dma_map_ops *ops = get_dma_ops(dev);
4503+ const struct dma_map_ops *ops = get_dma_ops(dev);
4504
4505 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
4506 ops->free_coherent(dev, size, cpu_addr, dma_handle);
4507diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4508index 381a1b5..b97e3ff 100644
4509--- a/arch/sparc/include/asm/elf_32.h
4510+++ b/arch/sparc/include/asm/elf_32.h
4511@@ -116,6 +116,13 @@ typedef struct {
4512
4513 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4514
4515+#ifdef CONFIG_PAX_ASLR
4516+#define PAX_ELF_ET_DYN_BASE 0x10000UL
4517+
4518+#define PAX_DELTA_MMAP_LEN 16
4519+#define PAX_DELTA_STACK_LEN 16
4520+#endif
4521+
4522 /* This yields a mask that user programs can use to figure out what
4523 instruction set this cpu supports. This can NOT be done in userspace
4524 on Sparc. */
4525diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4526index 9968085..c2106ef 100644
4527--- a/arch/sparc/include/asm/elf_64.h
4528+++ b/arch/sparc/include/asm/elf_64.h
4529@@ -163,6 +163,12 @@ typedef struct {
4530 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4531 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4532
4533+#ifdef CONFIG_PAX_ASLR
4534+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4535+
4536+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4537+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4538+#endif
4539
4540 /* This yields a mask that user programs can use to figure out what
4541 instruction set this cpu supports. */
4542diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4543index e0cabe7..efd60f1 100644
4544--- a/arch/sparc/include/asm/pgtable_32.h
4545+++ b/arch/sparc/include/asm/pgtable_32.h
4546@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4547 BTFIXUPDEF_INT(page_none)
4548 BTFIXUPDEF_INT(page_copy)
4549 BTFIXUPDEF_INT(page_readonly)
4550+
4551+#ifdef CONFIG_PAX_PAGEEXEC
4552+BTFIXUPDEF_INT(page_shared_noexec)
4553+BTFIXUPDEF_INT(page_copy_noexec)
4554+BTFIXUPDEF_INT(page_readonly_noexec)
4555+#endif
4556+
4557 BTFIXUPDEF_INT(page_kernel)
4558
4559 #define PMD_SHIFT SUN4C_PMD_SHIFT
4560@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4561 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4562 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4563
4564+#ifdef CONFIG_PAX_PAGEEXEC
4565+extern pgprot_t PAGE_SHARED_NOEXEC;
4566+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4567+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4568+#else
4569+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4570+# define PAGE_COPY_NOEXEC PAGE_COPY
4571+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4572+#endif
4573+
4574 extern unsigned long page_kernel;
4575
4576 #ifdef MODULE
4577diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4578index 1407c07..7e10231 100644
4579--- a/arch/sparc/include/asm/pgtsrmmu.h
4580+++ b/arch/sparc/include/asm/pgtsrmmu.h
4581@@ -115,6 +115,13 @@
4582 SRMMU_EXEC | SRMMU_REF)
4583 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4584 SRMMU_EXEC | SRMMU_REF)
4585+
4586+#ifdef CONFIG_PAX_PAGEEXEC
4587+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4588+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4589+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4590+#endif
4591+
4592 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4593 SRMMU_DIRTY | SRMMU_REF)
4594
4595diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4596index 43e5147..47622a1 100644
4597--- a/arch/sparc/include/asm/spinlock_64.h
4598+++ b/arch/sparc/include/asm/spinlock_64.h
4599@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
4600
4601 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4602
4603-static void inline arch_read_lock(raw_rwlock_t *lock)
4604+static inline void arch_read_lock(raw_rwlock_t *lock)
4605 {
4606 unsigned long tmp1, tmp2;
4607
4608 __asm__ __volatile__ (
4609 "1: ldsw [%2], %0\n"
4610 " brlz,pn %0, 2f\n"
4611-"4: add %0, 1, %1\n"
4612+"4: addcc %0, 1, %1\n"
4613+
4614+#ifdef CONFIG_PAX_REFCOUNT
4615+" tvs %%icc, 6\n"
4616+#endif
4617+
4618 " cas [%2], %0, %1\n"
4619 " cmp %0, %1\n"
4620 " bne,pn %%icc, 1b\n"
4621@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
4622 " .previous"
4623 : "=&r" (tmp1), "=&r" (tmp2)
4624 : "r" (lock)
4625- : "memory");
4626+ : "memory", "cc");
4627 }
4628
4629-static int inline arch_read_trylock(raw_rwlock_t *lock)
4630+static inline int arch_read_trylock(raw_rwlock_t *lock)
4631 {
4632 int tmp1, tmp2;
4633
4634@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4635 "1: ldsw [%2], %0\n"
4636 " brlz,a,pn %0, 2f\n"
4637 " mov 0, %0\n"
4638-" add %0, 1, %1\n"
4639+" addcc %0, 1, %1\n"
4640+
4641+#ifdef CONFIG_PAX_REFCOUNT
4642+" tvs %%icc, 6\n"
4643+#endif
4644+
4645 " cas [%2], %0, %1\n"
4646 " cmp %0, %1\n"
4647 " bne,pn %%icc, 1b\n"
4648@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4649 return tmp1;
4650 }
4651
4652-static void inline arch_read_unlock(raw_rwlock_t *lock)
4653+static inline void arch_read_unlock(raw_rwlock_t *lock)
4654 {
4655 unsigned long tmp1, tmp2;
4656
4657 __asm__ __volatile__(
4658 "1: lduw [%2], %0\n"
4659-" sub %0, 1, %1\n"
4660+" subcc %0, 1, %1\n"
4661+
4662+#ifdef CONFIG_PAX_REFCOUNT
4663+" tvs %%icc, 6\n"
4664+#endif
4665+
4666 " cas [%2], %0, %1\n"
4667 " cmp %0, %1\n"
4668 " bne,pn %%xcc, 1b\n"
4669@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
4670 : "memory");
4671 }
4672
4673-static void inline arch_write_lock(raw_rwlock_t *lock)
4674+static inline void arch_write_lock(raw_rwlock_t *lock)
4675 {
4676 unsigned long mask, tmp1, tmp2;
4677
4678@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
4679 : "memory");
4680 }
4681
4682-static void inline arch_write_unlock(raw_rwlock_t *lock)
4683+static inline void arch_write_unlock(raw_rwlock_t *lock)
4684 {
4685 __asm__ __volatile__(
4686 " stw %%g0, [%0]"
4687@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
4688 : "memory");
4689 }
4690
4691-static int inline arch_write_trylock(raw_rwlock_t *lock)
4692+static inline int arch_write_trylock(raw_rwlock_t *lock)
4693 {
4694 unsigned long mask, tmp1, tmp2, result;
4695
4696diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
4697index 844d73a..f787fb9 100644
4698--- a/arch/sparc/include/asm/thread_info_32.h
4699+++ b/arch/sparc/include/asm/thread_info_32.h
4700@@ -50,6 +50,8 @@ struct thread_info {
4701 unsigned long w_saved;
4702
4703 struct restart_block restart_block;
4704+
4705+ unsigned long lowest_stack;
4706 };
4707
4708 /*
4709diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
4710index f78ad9a..9f55fc7 100644
4711--- a/arch/sparc/include/asm/thread_info_64.h
4712+++ b/arch/sparc/include/asm/thread_info_64.h
4713@@ -68,6 +68,8 @@ struct thread_info {
4714 struct pt_regs *kern_una_regs;
4715 unsigned int kern_una_insn;
4716
4717+ unsigned long lowest_stack;
4718+
4719 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4720 };
4721
4722diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
4723index e88fbe5..96b0ce5 100644
4724--- a/arch/sparc/include/asm/uaccess.h
4725+++ b/arch/sparc/include/asm/uaccess.h
4726@@ -1,5 +1,13 @@
4727 #ifndef ___ASM_SPARC_UACCESS_H
4728 #define ___ASM_SPARC_UACCESS_H
4729+
4730+#ifdef __KERNEL__
4731+#ifndef __ASSEMBLY__
4732+#include <linux/types.h>
4733+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4734+#endif
4735+#endif
4736+
4737 #if defined(__sparc__) && defined(__arch64__)
4738 #include <asm/uaccess_64.h>
4739 #else
4740diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
4741index 8303ac4..07f333d 100644
4742--- a/arch/sparc/include/asm/uaccess_32.h
4743+++ b/arch/sparc/include/asm/uaccess_32.h
4744@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
4745
4746 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4747 {
4748- if (n && __access_ok((unsigned long) to, n))
4749+ if ((long)n < 0)
4750+ return n;
4751+
4752+ if (n && __access_ok((unsigned long) to, n)) {
4753+ if (!__builtin_constant_p(n))
4754+ check_object_size(from, n, true);
4755 return __copy_user(to, (__force void __user *) from, n);
4756- else
4757+ } else
4758 return n;
4759 }
4760
4761 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4762 {
4763+ if ((long)n < 0)
4764+ return n;
4765+
4766+ if (!__builtin_constant_p(n))
4767+ check_object_size(from, n, true);
4768+
4769 return __copy_user(to, (__force void __user *) from, n);
4770 }
4771
4772 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4773 {
4774- if (n && __access_ok((unsigned long) from, n))
4775+ if ((long)n < 0)
4776+ return n;
4777+
4778+ if (n && __access_ok((unsigned long) from, n)) {
4779+ if (!__builtin_constant_p(n))
4780+ check_object_size(to, n, false);
4781 return __copy_user((__force void __user *) to, from, n);
4782- else
4783+ } else
4784 return n;
4785 }
4786
4787 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4788 {
4789+ if ((long)n < 0)
4790+ return n;
4791+
4792 return __copy_user((__force void __user *) to, from, n);
4793 }
4794
4795diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
4796index 9ea271e..7b8a271 100644
4797--- a/arch/sparc/include/asm/uaccess_64.h
4798+++ b/arch/sparc/include/asm/uaccess_64.h
4799@@ -9,6 +9,7 @@
4800 #include <linux/compiler.h>
4801 #include <linux/string.h>
4802 #include <linux/thread_info.h>
4803+#include <linux/kernel.h>
4804 #include <asm/asi.h>
4805 #include <asm/system.h>
4806 #include <asm/spitfire.h>
4807@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
4808 static inline unsigned long __must_check
4809 copy_from_user(void *to, const void __user *from, unsigned long size)
4810 {
4811- unsigned long ret = ___copy_from_user(to, from, size);
4812+ unsigned long ret;
4813
4814+ if ((long)size < 0 || size > INT_MAX)
4815+ return size;
4816+
4817+ if (!__builtin_constant_p(size))
4818+ check_object_size(to, size, false);
4819+
4820+ ret = ___copy_from_user(to, from, size);
4821 if (unlikely(ret))
4822 ret = copy_from_user_fixup(to, from, size);
4823 return ret;
4824@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
4825 static inline unsigned long __must_check
4826 copy_to_user(void __user *to, const void *from, unsigned long size)
4827 {
4828- unsigned long ret = ___copy_to_user(to, from, size);
4829+ unsigned long ret;
4830+
4831+ if ((long)size < 0 || size > INT_MAX)
4832+ return size;
4833+
4834+ if (!__builtin_constant_p(size))
4835+ check_object_size(from, size, true);
4836
4837+ ret = ___copy_to_user(to, from, size);
4838 if (unlikely(ret))
4839 ret = copy_to_user_fixup(to, from, size);
4840 return ret;
4841diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
4842index 2782681..77ded84 100644
4843--- a/arch/sparc/kernel/Makefile
4844+++ b/arch/sparc/kernel/Makefile
4845@@ -3,7 +3,7 @@
4846 #
4847
4848 asflags-y := -ansi
4849-ccflags-y := -Werror
4850+#ccflags-y := -Werror
4851
4852 extra-y := head_$(BITS).o
4853 extra-y += init_task.o
4854diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
4855index 7690cc2..ece64c9 100644
4856--- a/arch/sparc/kernel/iommu.c
4857+++ b/arch/sparc/kernel/iommu.c
4858@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
4859 spin_unlock_irqrestore(&iommu->lock, flags);
4860 }
4861
4862-static struct dma_map_ops sun4u_dma_ops = {
4863+static const struct dma_map_ops sun4u_dma_ops = {
4864 .alloc_coherent = dma_4u_alloc_coherent,
4865 .free_coherent = dma_4u_free_coherent,
4866 .map_page = dma_4u_map_page,
4867@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
4868 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4869 };
4870
4871-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4872+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4873 EXPORT_SYMBOL(dma_ops);
4874
4875 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4876diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
4877index 9f61fd8..bd048db 100644
4878--- a/arch/sparc/kernel/ioport.c
4879+++ b/arch/sparc/kernel/ioport.c
4880@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
4881 BUG();
4882 }
4883
4884-struct dma_map_ops sbus_dma_ops = {
4885+const struct dma_map_ops sbus_dma_ops = {
4886 .alloc_coherent = sbus_alloc_coherent,
4887 .free_coherent = sbus_free_coherent,
4888 .map_page = sbus_map_page,
4889@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4890 .sync_sg_for_device = sbus_sync_sg_for_device,
4891 };
4892
4893-struct dma_map_ops *dma_ops = &sbus_dma_ops;
4894+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4895 EXPORT_SYMBOL(dma_ops);
4896
4897 static int __init sparc_register_ioport(void)
4898@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
4899 }
4900 }
4901
4902-struct dma_map_ops pci32_dma_ops = {
4903+const struct dma_map_ops pci32_dma_ops = {
4904 .alloc_coherent = pci32_alloc_coherent,
4905 .free_coherent = pci32_free_coherent,
4906 .map_page = pci32_map_page,
4907diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
4908index 04df4ed..55c4b6e 100644
4909--- a/arch/sparc/kernel/kgdb_32.c
4910+++ b/arch/sparc/kernel/kgdb_32.c
4911@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4912 {
4913 }
4914
4915-struct kgdb_arch arch_kgdb_ops = {
4916+const struct kgdb_arch arch_kgdb_ops = {
4917 /* Breakpoint instruction: ta 0x7d */
4918 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4919 };
4920diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
4921index f5a0fd4..d886f71 100644
4922--- a/arch/sparc/kernel/kgdb_64.c
4923+++ b/arch/sparc/kernel/kgdb_64.c
4924@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4925 {
4926 }
4927
4928-struct kgdb_arch arch_kgdb_ops = {
4929+const struct kgdb_arch arch_kgdb_ops = {
4930 /* Breakpoint instruction: ta 0x72 */
4931 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4932 };
4933diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
4934index 23c33ff..d137fbd 100644
4935--- a/arch/sparc/kernel/pci_sun4v.c
4936+++ b/arch/sparc/kernel/pci_sun4v.c
4937@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
4938 spin_unlock_irqrestore(&iommu->lock, flags);
4939 }
4940
4941-static struct dma_map_ops sun4v_dma_ops = {
4942+static const struct dma_map_ops sun4v_dma_ops = {
4943 .alloc_coherent = dma_4v_alloc_coherent,
4944 .free_coherent = dma_4v_free_coherent,
4945 .map_page = dma_4v_map_page,
4946diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
4947index c49865b..b41a81b 100644
4948--- a/arch/sparc/kernel/process_32.c
4949+++ b/arch/sparc/kernel/process_32.c
4950@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4951 rw->ins[4], rw->ins[5],
4952 rw->ins[6],
4953 rw->ins[7]);
4954- printk("%pS\n", (void *) rw->ins[7]);
4955+ printk("%pA\n", (void *) rw->ins[7]);
4956 rw = (struct reg_window32 *) rw->ins[6];
4957 }
4958 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4959@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4960
4961 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4962 r->psr, r->pc, r->npc, r->y, print_tainted());
4963- printk("PC: <%pS>\n", (void *) r->pc);
4964+ printk("PC: <%pA>\n", (void *) r->pc);
4965 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4966 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4967 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4968 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4969 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4970 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4971- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4972+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4973
4974 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4975 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4976@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4977 rw = (struct reg_window32 *) fp;
4978 pc = rw->ins[7];
4979 printk("[%08lx : ", pc);
4980- printk("%pS ] ", (void *) pc);
4981+ printk("%pA ] ", (void *) pc);
4982 fp = rw->ins[6];
4983 } while (++count < 16);
4984 printk("\n");
4985diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
4986index cb70476..3d0c191 100644
4987--- a/arch/sparc/kernel/process_64.c
4988+++ b/arch/sparc/kernel/process_64.c
4989@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
4990 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4991 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4992 if (regs->tstate & TSTATE_PRIV)
4993- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4994+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4995 }
4996
4997 void show_regs(struct pt_regs *regs)
4998 {
4999 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5000 regs->tpc, regs->tnpc, regs->y, print_tainted());
5001- printk("TPC: <%pS>\n", (void *) regs->tpc);
5002+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5003 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5004 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5005 regs->u_regs[3]);
5006@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5007 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5008 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5009 regs->u_regs[15]);
5010- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5011+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5012 show_regwindow(regs);
5013 }
5014
5015@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5016 ((tp && tp->task) ? tp->task->pid : -1));
5017
5018 if (gp->tstate & TSTATE_PRIV) {
5019- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5020+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5021 (void *) gp->tpc,
5022 (void *) gp->o7,
5023 (void *) gp->i7,
5024diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5025index 6edc4e5..06a69b4 100644
5026--- a/arch/sparc/kernel/sigutil_64.c
5027+++ b/arch/sparc/kernel/sigutil_64.c
5028@@ -2,6 +2,7 @@
5029 #include <linux/types.h>
5030 #include <linux/thread_info.h>
5031 #include <linux/uaccess.h>
5032+#include <linux/errno.h>
5033
5034 #include <asm/sigcontext.h>
5035 #include <asm/fpumacro.h>
5036diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5037index 3a82e65..ce0a53a 100644
5038--- a/arch/sparc/kernel/sys_sparc_32.c
5039+++ b/arch/sparc/kernel/sys_sparc_32.c
5040@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5041 if (ARCH_SUN4C && len > 0x20000000)
5042 return -ENOMEM;
5043 if (!addr)
5044- addr = TASK_UNMAPPED_BASE;
5045+ addr = current->mm->mmap_base;
5046
5047 if (flags & MAP_SHARED)
5048 addr = COLOUR_ALIGN(addr);
5049@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5050 }
5051 if (TASK_SIZE - PAGE_SIZE - len < addr)
5052 return -ENOMEM;
5053- if (!vmm || addr + len <= vmm->vm_start)
5054+ if (check_heap_stack_gap(vmm, addr, len))
5055 return addr;
5056 addr = vmm->vm_end;
5057 if (flags & MAP_SHARED)
5058diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5059index cfa0e19..98972ac 100644
5060--- a/arch/sparc/kernel/sys_sparc_64.c
5061+++ b/arch/sparc/kernel/sys_sparc_64.c
5062@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5063 /* We do not accept a shared mapping if it would violate
5064 * cache aliasing constraints.
5065 */
5066- if ((flags & MAP_SHARED) &&
5067+ if ((filp || (flags & MAP_SHARED)) &&
5068 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5069 return -EINVAL;
5070 return addr;
5071@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5072 if (filp || (flags & MAP_SHARED))
5073 do_color_align = 1;
5074
5075+#ifdef CONFIG_PAX_RANDMMAP
5076+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5077+#endif
5078+
5079 if (addr) {
5080 if (do_color_align)
5081 addr = COLOUR_ALIGN(addr, pgoff);
5082@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5083 addr = PAGE_ALIGN(addr);
5084
5085 vma = find_vma(mm, addr);
5086- if (task_size - len >= addr &&
5087- (!vma || addr + len <= vma->vm_start))
5088+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5089 return addr;
5090 }
5091
5092 if (len > mm->cached_hole_size) {
5093- start_addr = addr = mm->free_area_cache;
5094+ start_addr = addr = mm->free_area_cache;
5095 } else {
5096- start_addr = addr = TASK_UNMAPPED_BASE;
5097+ start_addr = addr = mm->mmap_base;
5098 mm->cached_hole_size = 0;
5099 }
5100
5101@@ -175,14 +178,14 @@ full_search:
5102 vma = find_vma(mm, VA_EXCLUDE_END);
5103 }
5104 if (unlikely(task_size < addr)) {
5105- if (start_addr != TASK_UNMAPPED_BASE) {
5106- start_addr = addr = TASK_UNMAPPED_BASE;
5107+ if (start_addr != mm->mmap_base) {
5108+ start_addr = addr = mm->mmap_base;
5109 mm->cached_hole_size = 0;
5110 goto full_search;
5111 }
5112 return -ENOMEM;
5113 }
5114- if (likely(!vma || addr + len <= vma->vm_start)) {
5115+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5116 /*
5117 * Remember the place where we stopped the search:
5118 */
5119@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5120 /* We do not accept a shared mapping if it would violate
5121 * cache aliasing constraints.
5122 */
5123- if ((flags & MAP_SHARED) &&
5124+ if ((filp || (flags & MAP_SHARED)) &&
5125 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5126 return -EINVAL;
5127 return addr;
5128@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5129 addr = PAGE_ALIGN(addr);
5130
5131 vma = find_vma(mm, addr);
5132- if (task_size - len >= addr &&
5133- (!vma || addr + len <= vma->vm_start))
5134+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5135 return addr;
5136 }
5137
5138@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5139 /* make sure it can fit in the remaining address space */
5140 if (likely(addr > len)) {
5141 vma = find_vma(mm, addr-len);
5142- if (!vma || addr <= vma->vm_start) {
5143+ if (check_heap_stack_gap(vma, addr - len, len)) {
5144 /* remember the address as a hint for next time */
5145 return (mm->free_area_cache = addr-len);
5146 }
5147@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5148 if (unlikely(mm->mmap_base < len))
5149 goto bottomup;
5150
5151- addr = mm->mmap_base-len;
5152- if (do_color_align)
5153- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5154+ addr = mm->mmap_base - len;
5155
5156 do {
5157+ if (do_color_align)
5158+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5159 /*
5160 * Lookup failure means no vma is above this address,
5161 * else if new region fits below vma->vm_start,
5162 * return with success:
5163 */
5164 vma = find_vma(mm, addr);
5165- if (likely(!vma || addr+len <= vma->vm_start)) {
5166+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5167 /* remember the address as a hint for next time */
5168 return (mm->free_area_cache = addr);
5169 }
5170@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5171 mm->cached_hole_size = vma->vm_start - addr;
5172
5173 /* try just below the current vma->vm_start */
5174- addr = vma->vm_start-len;
5175- if (do_color_align)
5176- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5177- } while (likely(len < vma->vm_start));
5178+ addr = skip_heap_stack_gap(vma, len);
5179+ } while (!IS_ERR_VALUE(addr));
5180
5181 bottomup:
5182 /*
5183@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5184 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5185 sysctl_legacy_va_layout) {
5186 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5187+
5188+#ifdef CONFIG_PAX_RANDMMAP
5189+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5190+ mm->mmap_base += mm->delta_mmap;
5191+#endif
5192+
5193 mm->get_unmapped_area = arch_get_unmapped_area;
5194 mm->unmap_area = arch_unmap_area;
5195 } else {
5196@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5197 gap = (task_size / 6 * 5);
5198
5199 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5200+
5201+#ifdef CONFIG_PAX_RANDMMAP
5202+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5203+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5204+#endif
5205+
5206 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5207 mm->unmap_area = arch_unmap_area_topdown;
5208 }
5209diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5210index c0490c7..84959d1 100644
5211--- a/arch/sparc/kernel/traps_32.c
5212+++ b/arch/sparc/kernel/traps_32.c
5213@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5214 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5215 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5216
5217+extern void gr_handle_kernel_exploit(void);
5218+
5219 void die_if_kernel(char *str, struct pt_regs *regs)
5220 {
5221 static int die_counter;
5222@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5223 count++ < 30 &&
5224 (((unsigned long) rw) >= PAGE_OFFSET) &&
5225 !(((unsigned long) rw) & 0x7)) {
5226- printk("Caller[%08lx]: %pS\n", rw->ins[7],
5227+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
5228 (void *) rw->ins[7]);
5229 rw = (struct reg_window32 *)rw->ins[6];
5230 }
5231 }
5232 printk("Instruction DUMP:");
5233 instruction_dump ((unsigned long *) regs->pc);
5234- if(regs->psr & PSR_PS)
5235+ if(regs->psr & PSR_PS) {
5236+ gr_handle_kernel_exploit();
5237 do_exit(SIGKILL);
5238+ }
5239 do_exit(SIGSEGV);
5240 }
5241
5242diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5243index 10f7bb9..cdb6793 100644
5244--- a/arch/sparc/kernel/traps_64.c
5245+++ b/arch/sparc/kernel/traps_64.c
5246@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5247 i + 1,
5248 p->trapstack[i].tstate, p->trapstack[i].tpc,
5249 p->trapstack[i].tnpc, p->trapstack[i].tt);
5250- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5251+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5252 }
5253 }
5254
5255@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5256
5257 lvl -= 0x100;
5258 if (regs->tstate & TSTATE_PRIV) {
5259+
5260+#ifdef CONFIG_PAX_REFCOUNT
5261+ if (lvl == 6)
5262+ pax_report_refcount_overflow(regs);
5263+#endif
5264+
5265 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5266 die_if_kernel(buffer, regs);
5267 }
5268@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5269 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5270 {
5271 char buffer[32];
5272-
5273+
5274 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5275 0, lvl, SIGTRAP) == NOTIFY_STOP)
5276 return;
5277
5278+#ifdef CONFIG_PAX_REFCOUNT
5279+ if (lvl == 6)
5280+ pax_report_refcount_overflow(regs);
5281+#endif
5282+
5283 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5284
5285 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5286@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5287 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5288 printk("%s" "ERROR(%d): ",
5289 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5290- printk("TPC<%pS>\n", (void *) regs->tpc);
5291+ printk("TPC<%pA>\n", (void *) regs->tpc);
5292 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5293 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5294 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5295@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5296 smp_processor_id(),
5297 (type & 0x1) ? 'I' : 'D',
5298 regs->tpc);
5299- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5300+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5301 panic("Irrecoverable Cheetah+ parity error.");
5302 }
5303
5304@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5305 smp_processor_id(),
5306 (type & 0x1) ? 'I' : 'D',
5307 regs->tpc);
5308- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5309+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5310 }
5311
5312 struct sun4v_error_entry {
5313@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5314
5315 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5316 regs->tpc, tl);
5317- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5318+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5319 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5320- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5321+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5322 (void *) regs->u_regs[UREG_I7]);
5323 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5324 "pte[%lx] error[%lx]\n",
5325@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5326
5327 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5328 regs->tpc, tl);
5329- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5330+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5331 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5332- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5333+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5334 (void *) regs->u_regs[UREG_I7]);
5335 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5336 "pte[%lx] error[%lx]\n",
5337@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5338 fp = (unsigned long)sf->fp + STACK_BIAS;
5339 }
5340
5341- printk(" [%016lx] %pS\n", pc, (void *) pc);
5342+ printk(" [%016lx] %pA\n", pc, (void *) pc);
5343 } while (++count < 16);
5344 }
5345
5346@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5347 return (struct reg_window *) (fp + STACK_BIAS);
5348 }
5349
5350+extern void gr_handle_kernel_exploit(void);
5351+
5352 void die_if_kernel(char *str, struct pt_regs *regs)
5353 {
5354 static int die_counter;
5355@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5356 while (rw &&
5357 count++ < 30&&
5358 is_kernel_stack(current, rw)) {
5359- printk("Caller[%016lx]: %pS\n", rw->ins[7],
5360+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
5361 (void *) rw->ins[7]);
5362
5363 rw = kernel_stack_up(rw);
5364@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5365 }
5366 user_instruction_dump ((unsigned int __user *) regs->tpc);
5367 }
5368- if (regs->tstate & TSTATE_PRIV)
5369+ if (regs->tstate & TSTATE_PRIV) {
5370+ gr_handle_kernel_exploit();
5371 do_exit(SIGKILL);
5372+ }
5373+
5374 do_exit(SIGSEGV);
5375 }
5376 EXPORT_SYMBOL(die_if_kernel);
5377diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
5378index be183fe..1c8d332 100644
5379--- a/arch/sparc/kernel/una_asm_64.S
5380+++ b/arch/sparc/kernel/una_asm_64.S
5381@@ -127,7 +127,7 @@ do_int_load:
5382 wr %o5, 0x0, %asi
5383 retl
5384 mov 0, %o0
5385- .size __do_int_load, .-__do_int_load
5386+ .size do_int_load, .-do_int_load
5387
5388 .section __ex_table,"a"
5389 .word 4b, __retl_efault
5390diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5391index 3792099..2af17d8 100644
5392--- a/arch/sparc/kernel/unaligned_64.c
5393+++ b/arch/sparc/kernel/unaligned_64.c
5394@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
5395 if (count < 5) {
5396 last_time = jiffies;
5397 count++;
5398- printk("Kernel unaligned access at TPC[%lx] %pS\n",
5399+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
5400 regs->tpc, (void *) regs->tpc);
5401 }
5402 }
5403diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5404index e75faf0..24f12f9 100644
5405--- a/arch/sparc/lib/Makefile
5406+++ b/arch/sparc/lib/Makefile
5407@@ -2,7 +2,7 @@
5408 #
5409
5410 asflags-y := -ansi -DST_DIV0=0x02
5411-ccflags-y := -Werror
5412+#ccflags-y := -Werror
5413
5414 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5415 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5416diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5417index 0268210..f0291ca 100644
5418--- a/arch/sparc/lib/atomic_64.S
5419+++ b/arch/sparc/lib/atomic_64.S
5420@@ -18,7 +18,12 @@
5421 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5422 BACKOFF_SETUP(%o2)
5423 1: lduw [%o1], %g1
5424- add %g1, %o0, %g7
5425+ addcc %g1, %o0, %g7
5426+
5427+#ifdef CONFIG_PAX_REFCOUNT
5428+ tvs %icc, 6
5429+#endif
5430+
5431 cas [%o1], %g1, %g7
5432 cmp %g1, %g7
5433 bne,pn %icc, 2f
5434@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5435 2: BACKOFF_SPIN(%o2, %o3, 1b)
5436 .size atomic_add, .-atomic_add
5437
5438+ .globl atomic_add_unchecked
5439+ .type atomic_add_unchecked,#function
5440+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5441+ BACKOFF_SETUP(%o2)
5442+1: lduw [%o1], %g1
5443+ add %g1, %o0, %g7
5444+ cas [%o1], %g1, %g7
5445+ cmp %g1, %g7
5446+ bne,pn %icc, 2f
5447+ nop
5448+ retl
5449+ nop
5450+2: BACKOFF_SPIN(%o2, %o3, 1b)
5451+ .size atomic_add_unchecked, .-atomic_add_unchecked
5452+
5453 .globl atomic_sub
5454 .type atomic_sub,#function
5455 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5456 BACKOFF_SETUP(%o2)
5457 1: lduw [%o1], %g1
5458- sub %g1, %o0, %g7
5459+ subcc %g1, %o0, %g7
5460+
5461+#ifdef CONFIG_PAX_REFCOUNT
5462+ tvs %icc, 6
5463+#endif
5464+
5465 cas [%o1], %g1, %g7
5466 cmp %g1, %g7
5467 bne,pn %icc, 2f
5468@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5469 2: BACKOFF_SPIN(%o2, %o3, 1b)
5470 .size atomic_sub, .-atomic_sub
5471
5472+ .globl atomic_sub_unchecked
5473+ .type atomic_sub_unchecked,#function
5474+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5475+ BACKOFF_SETUP(%o2)
5476+1: lduw [%o1], %g1
5477+ sub %g1, %o0, %g7
5478+ cas [%o1], %g1, %g7
5479+ cmp %g1, %g7
5480+ bne,pn %icc, 2f
5481+ nop
5482+ retl
5483+ nop
5484+2: BACKOFF_SPIN(%o2, %o3, 1b)
5485+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
5486+
5487 .globl atomic_add_ret
5488 .type atomic_add_ret,#function
5489 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5490 BACKOFF_SETUP(%o2)
5491 1: lduw [%o1], %g1
5492- add %g1, %o0, %g7
5493+ addcc %g1, %o0, %g7
5494+
5495+#ifdef CONFIG_PAX_REFCOUNT
5496+ tvs %icc, 6
5497+#endif
5498+
5499 cas [%o1], %g1, %g7
5500 cmp %g1, %g7
5501 bne,pn %icc, 2f
5502@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5503 2: BACKOFF_SPIN(%o2, %o3, 1b)
5504 .size atomic_add_ret, .-atomic_add_ret
5505
5506+ .globl atomic_add_ret_unchecked
5507+ .type atomic_add_ret_unchecked,#function
5508+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5509+ BACKOFF_SETUP(%o2)
5510+1: lduw [%o1], %g1
5511+ addcc %g1, %o0, %g7
5512+ cas [%o1], %g1, %g7
5513+ cmp %g1, %g7
5514+ bne,pn %icc, 2f
5515+ add %g7, %o0, %g7
5516+ sra %g7, 0, %o0
5517+ retl
5518+ nop
5519+2: BACKOFF_SPIN(%o2, %o3, 1b)
5520+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5521+
5522 .globl atomic_sub_ret
5523 .type atomic_sub_ret,#function
5524 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5525 BACKOFF_SETUP(%o2)
5526 1: lduw [%o1], %g1
5527- sub %g1, %o0, %g7
5528+ subcc %g1, %o0, %g7
5529+
5530+#ifdef CONFIG_PAX_REFCOUNT
5531+ tvs %icc, 6
5532+#endif
5533+
5534 cas [%o1], %g1, %g7
5535 cmp %g1, %g7
5536 bne,pn %icc, 2f
5537@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5538 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5539 BACKOFF_SETUP(%o2)
5540 1: ldx [%o1], %g1
5541- add %g1, %o0, %g7
5542+ addcc %g1, %o0, %g7
5543+
5544+#ifdef CONFIG_PAX_REFCOUNT
5545+ tvs %xcc, 6
5546+#endif
5547+
5548 casx [%o1], %g1, %g7
5549 cmp %g1, %g7
5550 bne,pn %xcc, 2f
5551@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5552 2: BACKOFF_SPIN(%o2, %o3, 1b)
5553 .size atomic64_add, .-atomic64_add
5554
5555+ .globl atomic64_add_unchecked
5556+ .type atomic64_add_unchecked,#function
5557+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5558+ BACKOFF_SETUP(%o2)
5559+1: ldx [%o1], %g1
5560+ addcc %g1, %o0, %g7
5561+ casx [%o1], %g1, %g7
5562+ cmp %g1, %g7
5563+ bne,pn %xcc, 2f
5564+ nop
5565+ retl
5566+ nop
5567+2: BACKOFF_SPIN(%o2, %o3, 1b)
5568+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
5569+
5570 .globl atomic64_sub
5571 .type atomic64_sub,#function
5572 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5573 BACKOFF_SETUP(%o2)
5574 1: ldx [%o1], %g1
5575- sub %g1, %o0, %g7
5576+ subcc %g1, %o0, %g7
5577+
5578+#ifdef CONFIG_PAX_REFCOUNT
5579+ tvs %xcc, 6
5580+#endif
5581+
5582 casx [%o1], %g1, %g7
5583 cmp %g1, %g7
5584 bne,pn %xcc, 2f
5585@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5586 2: BACKOFF_SPIN(%o2, %o3, 1b)
5587 .size atomic64_sub, .-atomic64_sub
5588
5589+ .globl atomic64_sub_unchecked
5590+ .type atomic64_sub_unchecked,#function
5591+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5592+ BACKOFF_SETUP(%o2)
5593+1: ldx [%o1], %g1
5594+ subcc %g1, %o0, %g7
5595+ casx [%o1], %g1, %g7
5596+ cmp %g1, %g7
5597+ bne,pn %xcc, 2f
5598+ nop
5599+ retl
5600+ nop
5601+2: BACKOFF_SPIN(%o2, %o3, 1b)
5602+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5603+
5604 .globl atomic64_add_ret
5605 .type atomic64_add_ret,#function
5606 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5607 BACKOFF_SETUP(%o2)
5608 1: ldx [%o1], %g1
5609- add %g1, %o0, %g7
5610+ addcc %g1, %o0, %g7
5611+
5612+#ifdef CONFIG_PAX_REFCOUNT
5613+ tvs %xcc, 6
5614+#endif
5615+
5616 casx [%o1], %g1, %g7
5617 cmp %g1, %g7
5618 bne,pn %xcc, 2f
5619@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5620 2: BACKOFF_SPIN(%o2, %o3, 1b)
5621 .size atomic64_add_ret, .-atomic64_add_ret
5622
5623+ .globl atomic64_add_ret_unchecked
5624+ .type atomic64_add_ret_unchecked,#function
5625+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5626+ BACKOFF_SETUP(%o2)
5627+1: ldx [%o1], %g1
5628+ addcc %g1, %o0, %g7
5629+ casx [%o1], %g1, %g7
5630+ cmp %g1, %g7
5631+ bne,pn %xcc, 2f
5632+ add %g7, %o0, %g7
5633+ mov %g7, %o0
5634+ retl
5635+ nop
5636+2: BACKOFF_SPIN(%o2, %o3, 1b)
5637+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5638+
5639 .globl atomic64_sub_ret
5640 .type atomic64_sub_ret,#function
5641 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5642 BACKOFF_SETUP(%o2)
5643 1: ldx [%o1], %g1
5644- sub %g1, %o0, %g7
5645+ subcc %g1, %o0, %g7
5646+
5647+#ifdef CONFIG_PAX_REFCOUNT
5648+ tvs %xcc, 6
5649+#endif
5650+
5651 casx [%o1], %g1, %g7
5652 cmp %g1, %g7
5653 bne,pn %xcc, 2f
5654diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5655index 704b126..2e79d76 100644
5656--- a/arch/sparc/lib/ksyms.c
5657+++ b/arch/sparc/lib/ksyms.c
5658@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5659
5660 /* Atomic counter implementation. */
5661 EXPORT_SYMBOL(atomic_add);
5662+EXPORT_SYMBOL(atomic_add_unchecked);
5663 EXPORT_SYMBOL(atomic_add_ret);
5664+EXPORT_SYMBOL(atomic_add_ret_unchecked);
5665 EXPORT_SYMBOL(atomic_sub);
5666+EXPORT_SYMBOL(atomic_sub_unchecked);
5667 EXPORT_SYMBOL(atomic_sub_ret);
5668 EXPORT_SYMBOL(atomic64_add);
5669+EXPORT_SYMBOL(atomic64_add_unchecked);
5670 EXPORT_SYMBOL(atomic64_add_ret);
5671+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5672 EXPORT_SYMBOL(atomic64_sub);
5673+EXPORT_SYMBOL(atomic64_sub_unchecked);
5674 EXPORT_SYMBOL(atomic64_sub_ret);
5675
5676 /* Atomic bit operations. */
5677diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
5678index 91a7d29..ce75c29 100644
5679--- a/arch/sparc/lib/rwsem_64.S
5680+++ b/arch/sparc/lib/rwsem_64.S
5681@@ -11,7 +11,12 @@
5682 .globl __down_read
5683 __down_read:
5684 1: lduw [%o0], %g1
5685- add %g1, 1, %g7
5686+ addcc %g1, 1, %g7
5687+
5688+#ifdef CONFIG_PAX_REFCOUNT
5689+ tvs %icc, 6
5690+#endif
5691+
5692 cas [%o0], %g1, %g7
5693 cmp %g1, %g7
5694 bne,pn %icc, 1b
5695@@ -33,7 +38,12 @@ __down_read:
5696 .globl __down_read_trylock
5697 __down_read_trylock:
5698 1: lduw [%o0], %g1
5699- add %g1, 1, %g7
5700+ addcc %g1, 1, %g7
5701+
5702+#ifdef CONFIG_PAX_REFCOUNT
5703+ tvs %icc, 6
5704+#endif
5705+
5706 cmp %g7, 0
5707 bl,pn %icc, 2f
5708 mov 0, %o1
5709@@ -51,7 +61,12 @@ __down_write:
5710 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5711 1:
5712 lduw [%o0], %g3
5713- add %g3, %g1, %g7
5714+ addcc %g3, %g1, %g7
5715+
5716+#ifdef CONFIG_PAX_REFCOUNT
5717+ tvs %icc, 6
5718+#endif
5719+
5720 cas [%o0], %g3, %g7
5721 cmp %g3, %g7
5722 bne,pn %icc, 1b
5723@@ -77,7 +92,12 @@ __down_write_trylock:
5724 cmp %g3, 0
5725 bne,pn %icc, 2f
5726 mov 0, %o1
5727- add %g3, %g1, %g7
5728+ addcc %g3, %g1, %g7
5729+
5730+#ifdef CONFIG_PAX_REFCOUNT
5731+ tvs %icc, 6
5732+#endif
5733+
5734 cas [%o0], %g3, %g7
5735 cmp %g3, %g7
5736 bne,pn %icc, 1b
5737@@ -90,7 +110,12 @@ __down_write_trylock:
5738 __up_read:
5739 1:
5740 lduw [%o0], %g1
5741- sub %g1, 1, %g7
5742+ subcc %g1, 1, %g7
5743+
5744+#ifdef CONFIG_PAX_REFCOUNT
5745+ tvs %icc, 6
5746+#endif
5747+
5748 cas [%o0], %g1, %g7
5749 cmp %g1, %g7
5750 bne,pn %icc, 1b
5751@@ -118,7 +143,12 @@ __up_write:
5752 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5753 1:
5754 lduw [%o0], %g3
5755- sub %g3, %g1, %g7
5756+ subcc %g3, %g1, %g7
5757+
5758+#ifdef CONFIG_PAX_REFCOUNT
5759+ tvs %icc, 6
5760+#endif
5761+
5762 cas [%o0], %g3, %g7
5763 cmp %g3, %g7
5764 bne,pn %icc, 1b
5765@@ -143,7 +173,12 @@ __downgrade_write:
5766 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5767 1:
5768 lduw [%o0], %g3
5769- sub %g3, %g1, %g7
5770+ subcc %g3, %g1, %g7
5771+
5772+#ifdef CONFIG_PAX_REFCOUNT
5773+ tvs %icc, 6
5774+#endif
5775+
5776 cas [%o0], %g3, %g7
5777 cmp %g3, %g7
5778 bne,pn %icc, 1b
5779diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5780index 79836a7..62f47a2 100644
5781--- a/arch/sparc/mm/Makefile
5782+++ b/arch/sparc/mm/Makefile
5783@@ -2,7 +2,7 @@
5784 #
5785
5786 asflags-y := -ansi
5787-ccflags-y := -Werror
5788+#ccflags-y := -Werror
5789
5790 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5791 obj-y += fault_$(BITS).o
5792diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5793index b99f81c..3453e93 100644
5794--- a/arch/sparc/mm/fault_32.c
5795+++ b/arch/sparc/mm/fault_32.c
5796@@ -21,6 +21,9 @@
5797 #include <linux/interrupt.h>
5798 #include <linux/module.h>
5799 #include <linux/kdebug.h>
5800+#include <linux/slab.h>
5801+#include <linux/pagemap.h>
5802+#include <linux/compiler.h>
5803
5804 #include <asm/system.h>
5805 #include <asm/page.h>
5806@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5807 return safe_compute_effective_address(regs, insn);
5808 }
5809
5810+#ifdef CONFIG_PAX_PAGEEXEC
5811+#ifdef CONFIG_PAX_DLRESOLVE
5812+static void pax_emuplt_close(struct vm_area_struct *vma)
5813+{
5814+ vma->vm_mm->call_dl_resolve = 0UL;
5815+}
5816+
5817+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5818+{
5819+ unsigned int *kaddr;
5820+
5821+ vmf->page = alloc_page(GFP_HIGHUSER);
5822+ if (!vmf->page)
5823+ return VM_FAULT_OOM;
5824+
5825+ kaddr = kmap(vmf->page);
5826+ memset(kaddr, 0, PAGE_SIZE);
5827+ kaddr[0] = 0x9DE3BFA8U; /* save */
5828+ flush_dcache_page(vmf->page);
5829+ kunmap(vmf->page);
5830+ return VM_FAULT_MAJOR;
5831+}
5832+
5833+static const struct vm_operations_struct pax_vm_ops = {
5834+ .close = pax_emuplt_close,
5835+ .fault = pax_emuplt_fault
5836+};
5837+
5838+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5839+{
5840+ int ret;
5841+
5842+ vma->vm_mm = current->mm;
5843+ vma->vm_start = addr;
5844+ vma->vm_end = addr + PAGE_SIZE;
5845+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5846+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5847+ vma->vm_ops = &pax_vm_ops;
5848+
5849+ ret = insert_vm_struct(current->mm, vma);
5850+ if (ret)
5851+ return ret;
5852+
5853+ ++current->mm->total_vm;
5854+ return 0;
5855+}
5856+#endif
5857+
5858+/*
5859+ * PaX: decide what to do with offenders (regs->pc = fault address)
5860+ *
5861+ * returns 1 when task should be killed
5862+ * 2 when patched PLT trampoline was detected
5863+ * 3 when unpatched PLT trampoline was detected
5864+ */
5865+static int pax_handle_fetch_fault(struct pt_regs *regs)
5866+{
5867+
5868+#ifdef CONFIG_PAX_EMUPLT
5869+ int err;
5870+
5871+ do { /* PaX: patched PLT emulation #1 */
5872+ unsigned int sethi1, sethi2, jmpl;
5873+
5874+ err = get_user(sethi1, (unsigned int *)regs->pc);
5875+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5876+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5877+
5878+ if (err)
5879+ break;
5880+
5881+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5882+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5883+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5884+ {
5885+ unsigned int addr;
5886+
5887+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5888+ addr = regs->u_regs[UREG_G1];
5889+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5890+ regs->pc = addr;
5891+ regs->npc = addr+4;
5892+ return 2;
5893+ }
5894+ } while (0);
5895+
5896+ { /* PaX: patched PLT emulation #2 */
5897+ unsigned int ba;
5898+
5899+ err = get_user(ba, (unsigned int *)regs->pc);
5900+
5901+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5902+ unsigned int addr;
5903+
5904+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5905+ regs->pc = addr;
5906+ regs->npc = addr+4;
5907+ return 2;
5908+ }
5909+ }
5910+
5911+ do { /* PaX: patched PLT emulation #3 */
5912+ unsigned int sethi, jmpl, nop;
5913+
5914+ err = get_user(sethi, (unsigned int *)regs->pc);
5915+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5916+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5917+
5918+ if (err)
5919+ break;
5920+
5921+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5922+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5923+ nop == 0x01000000U)
5924+ {
5925+ unsigned int addr;
5926+
5927+ addr = (sethi & 0x003FFFFFU) << 10;
5928+ regs->u_regs[UREG_G1] = addr;
5929+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5930+ regs->pc = addr;
5931+ regs->npc = addr+4;
5932+ return 2;
5933+ }
5934+ } while (0);
5935+
5936+ do { /* PaX: unpatched PLT emulation step 1 */
5937+ unsigned int sethi, ba, nop;
5938+
5939+ err = get_user(sethi, (unsigned int *)regs->pc);
5940+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
5941+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5942+
5943+ if (err)
5944+ break;
5945+
5946+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5947+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5948+ nop == 0x01000000U)
5949+ {
5950+ unsigned int addr, save, call;
5951+
5952+ if ((ba & 0xFFC00000U) == 0x30800000U)
5953+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5954+ else
5955+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5956+
5957+ err = get_user(save, (unsigned int *)addr);
5958+ err |= get_user(call, (unsigned int *)(addr+4));
5959+ err |= get_user(nop, (unsigned int *)(addr+8));
5960+ if (err)
5961+ break;
5962+
5963+#ifdef CONFIG_PAX_DLRESOLVE
5964+ if (save == 0x9DE3BFA8U &&
5965+ (call & 0xC0000000U) == 0x40000000U &&
5966+ nop == 0x01000000U)
5967+ {
5968+ struct vm_area_struct *vma;
5969+ unsigned long call_dl_resolve;
5970+
5971+ down_read(&current->mm->mmap_sem);
5972+ call_dl_resolve = current->mm->call_dl_resolve;
5973+ up_read(&current->mm->mmap_sem);
5974+ if (likely(call_dl_resolve))
5975+ goto emulate;
5976+
5977+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5978+
5979+ down_write(&current->mm->mmap_sem);
5980+ if (current->mm->call_dl_resolve) {
5981+ call_dl_resolve = current->mm->call_dl_resolve;
5982+ up_write(&current->mm->mmap_sem);
5983+ if (vma)
5984+ kmem_cache_free(vm_area_cachep, vma);
5985+ goto emulate;
5986+ }
5987+
5988+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5989+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5990+ up_write(&current->mm->mmap_sem);
5991+ if (vma)
5992+ kmem_cache_free(vm_area_cachep, vma);
5993+ return 1;
5994+ }
5995+
5996+ if (pax_insert_vma(vma, call_dl_resolve)) {
5997+ up_write(&current->mm->mmap_sem);
5998+ kmem_cache_free(vm_area_cachep, vma);
5999+ return 1;
6000+ }
6001+
6002+ current->mm->call_dl_resolve = call_dl_resolve;
6003+ up_write(&current->mm->mmap_sem);
6004+
6005+emulate:
6006+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6007+ regs->pc = call_dl_resolve;
6008+ regs->npc = addr+4;
6009+ return 3;
6010+ }
6011+#endif
6012+
6013+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6014+ if ((save & 0xFFC00000U) == 0x05000000U &&
6015+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6016+ nop == 0x01000000U)
6017+ {
6018+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6019+ regs->u_regs[UREG_G2] = addr + 4;
6020+ addr = (save & 0x003FFFFFU) << 10;
6021+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6022+ regs->pc = addr;
6023+ regs->npc = addr+4;
6024+ return 3;
6025+ }
6026+ }
6027+ } while (0);
6028+
6029+ do { /* PaX: unpatched PLT emulation step 2 */
6030+ unsigned int save, call, nop;
6031+
6032+ err = get_user(save, (unsigned int *)(regs->pc-4));
6033+ err |= get_user(call, (unsigned int *)regs->pc);
6034+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6035+ if (err)
6036+ break;
6037+
6038+ if (save == 0x9DE3BFA8U &&
6039+ (call & 0xC0000000U) == 0x40000000U &&
6040+ nop == 0x01000000U)
6041+ {
6042+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6043+
6044+ regs->u_regs[UREG_RETPC] = regs->pc;
6045+ regs->pc = dl_resolve;
6046+ regs->npc = dl_resolve+4;
6047+ return 3;
6048+ }
6049+ } while (0);
6050+#endif
6051+
6052+ return 1;
6053+}
6054+
6055+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6056+{
6057+ unsigned long i;
6058+
6059+ printk(KERN_ERR "PAX: bytes at PC: ");
6060+ for (i = 0; i < 8; i++) {
6061+ unsigned int c;
6062+ if (get_user(c, (unsigned int *)pc+i))
6063+ printk(KERN_CONT "???????? ");
6064+ else
6065+ printk(KERN_CONT "%08x ", c);
6066+ }
6067+ printk("\n");
6068+}
6069+#endif
6070+
6071 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6072 unsigned long address)
6073 {
6074@@ -231,6 +495,24 @@ good_area:
6075 if(!(vma->vm_flags & VM_WRITE))
6076 goto bad_area;
6077 } else {
6078+
6079+#ifdef CONFIG_PAX_PAGEEXEC
6080+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6081+ up_read(&mm->mmap_sem);
6082+ switch (pax_handle_fetch_fault(regs)) {
6083+
6084+#ifdef CONFIG_PAX_EMUPLT
6085+ case 2:
6086+ case 3:
6087+ return;
6088+#endif
6089+
6090+ }
6091+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6092+ do_group_exit(SIGKILL);
6093+ }
6094+#endif
6095+
6096 /* Allow reads even for write-only mappings */
6097 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6098 goto bad_area;
6099diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6100index 43b0da9..a0b78f9 100644
6101--- a/arch/sparc/mm/fault_64.c
6102+++ b/arch/sparc/mm/fault_64.c
6103@@ -20,6 +20,9 @@
6104 #include <linux/kprobes.h>
6105 #include <linux/kdebug.h>
6106 #include <linux/percpu.h>
6107+#include <linux/slab.h>
6108+#include <linux/pagemap.h>
6109+#include <linux/compiler.h>
6110
6111 #include <asm/page.h>
6112 #include <asm/pgtable.h>
6113@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6114 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6115 regs->tpc);
6116 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6117- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6118+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6119 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6120 dump_stack();
6121 unhandled_fault(regs->tpc, current, regs);
6122@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6123 show_regs(regs);
6124 }
6125
6126+#ifdef CONFIG_PAX_PAGEEXEC
6127+#ifdef CONFIG_PAX_DLRESOLVE
6128+static void pax_emuplt_close(struct vm_area_struct *vma)
6129+{
6130+ vma->vm_mm->call_dl_resolve = 0UL;
6131+}
6132+
6133+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6134+{
6135+ unsigned int *kaddr;
6136+
6137+ vmf->page = alloc_page(GFP_HIGHUSER);
6138+ if (!vmf->page)
6139+ return VM_FAULT_OOM;
6140+
6141+ kaddr = kmap(vmf->page);
6142+ memset(kaddr, 0, PAGE_SIZE);
6143+ kaddr[0] = 0x9DE3BFA8U; /* save */
6144+ flush_dcache_page(vmf->page);
6145+ kunmap(vmf->page);
6146+ return VM_FAULT_MAJOR;
6147+}
6148+
6149+static const struct vm_operations_struct pax_vm_ops = {
6150+ .close = pax_emuplt_close,
6151+ .fault = pax_emuplt_fault
6152+};
6153+
6154+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6155+{
6156+ int ret;
6157+
6158+ vma->vm_mm = current->mm;
6159+ vma->vm_start = addr;
6160+ vma->vm_end = addr + PAGE_SIZE;
6161+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6162+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6163+ vma->vm_ops = &pax_vm_ops;
6164+
6165+ ret = insert_vm_struct(current->mm, vma);
6166+ if (ret)
6167+ return ret;
6168+
6169+ ++current->mm->total_vm;
6170+ return 0;
6171+}
6172+#endif
6173+
6174+/*
6175+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6176+ *
6177+ * returns 1 when task should be killed
6178+ * 2 when patched PLT trampoline was detected
6179+ * 3 when unpatched PLT trampoline was detected
6180+ */
6181+static int pax_handle_fetch_fault(struct pt_regs *regs)
6182+{
6183+
6184+#ifdef CONFIG_PAX_EMUPLT
6185+ int err;
6186+
6187+ do { /* PaX: patched PLT emulation #1 */
6188+ unsigned int sethi1, sethi2, jmpl;
6189+
6190+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6191+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6192+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6193+
6194+ if (err)
6195+ break;
6196+
6197+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6198+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6199+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6200+ {
6201+ unsigned long addr;
6202+
6203+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6204+ addr = regs->u_regs[UREG_G1];
6205+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6206+
6207+ if (test_thread_flag(TIF_32BIT))
6208+ addr &= 0xFFFFFFFFUL;
6209+
6210+ regs->tpc = addr;
6211+ regs->tnpc = addr+4;
6212+ return 2;
6213+ }
6214+ } while (0);
6215+
6216+ { /* PaX: patched PLT emulation #2 */
6217+ unsigned int ba;
6218+
6219+ err = get_user(ba, (unsigned int *)regs->tpc);
6220+
6221+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6222+ unsigned long addr;
6223+
6224+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6225+
6226+ if (test_thread_flag(TIF_32BIT))
6227+ addr &= 0xFFFFFFFFUL;
6228+
6229+ regs->tpc = addr;
6230+ regs->tnpc = addr+4;
6231+ return 2;
6232+ }
6233+ }
6234+
6235+ do { /* PaX: patched PLT emulation #3 */
6236+ unsigned int sethi, jmpl, nop;
6237+
6238+ err = get_user(sethi, (unsigned int *)regs->tpc);
6239+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6240+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6241+
6242+ if (err)
6243+ break;
6244+
6245+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6246+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6247+ nop == 0x01000000U)
6248+ {
6249+ unsigned long addr;
6250+
6251+ addr = (sethi & 0x003FFFFFU) << 10;
6252+ regs->u_regs[UREG_G1] = addr;
6253+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6254+
6255+ if (test_thread_flag(TIF_32BIT))
6256+ addr &= 0xFFFFFFFFUL;
6257+
6258+ regs->tpc = addr;
6259+ regs->tnpc = addr+4;
6260+ return 2;
6261+ }
6262+ } while (0);
6263+
6264+ do { /* PaX: patched PLT emulation #4 */
6265+ unsigned int sethi, mov1, call, mov2;
6266+
6267+ err = get_user(sethi, (unsigned int *)regs->tpc);
6268+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6269+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
6270+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6271+
6272+ if (err)
6273+ break;
6274+
6275+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6276+ mov1 == 0x8210000FU &&
6277+ (call & 0xC0000000U) == 0x40000000U &&
6278+ mov2 == 0x9E100001U)
6279+ {
6280+ unsigned long addr;
6281+
6282+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6283+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6284+
6285+ if (test_thread_flag(TIF_32BIT))
6286+ addr &= 0xFFFFFFFFUL;
6287+
6288+ regs->tpc = addr;
6289+ regs->tnpc = addr+4;
6290+ return 2;
6291+ }
6292+ } while (0);
6293+
6294+ do { /* PaX: patched PLT emulation #5 */
6295+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6296+
6297+ err = get_user(sethi, (unsigned int *)regs->tpc);
6298+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6299+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6300+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6301+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6302+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6303+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6304+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6305+
6306+ if (err)
6307+ break;
6308+
6309+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6310+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6311+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6312+ (or1 & 0xFFFFE000U) == 0x82106000U &&
6313+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6314+ sllx == 0x83287020U &&
6315+ jmpl == 0x81C04005U &&
6316+ nop == 0x01000000U)
6317+ {
6318+ unsigned long addr;
6319+
6320+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6321+ regs->u_regs[UREG_G1] <<= 32;
6322+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6323+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6324+ regs->tpc = addr;
6325+ regs->tnpc = addr+4;
6326+ return 2;
6327+ }
6328+ } while (0);
6329+
6330+ do { /* PaX: patched PLT emulation #6 */
6331+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6332+
6333+ err = get_user(sethi, (unsigned int *)regs->tpc);
6334+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6335+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6336+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6337+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
6338+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6339+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6340+
6341+ if (err)
6342+ break;
6343+
6344+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6345+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6346+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6347+ sllx == 0x83287020U &&
6348+ (or & 0xFFFFE000U) == 0x8A116000U &&
6349+ jmpl == 0x81C04005U &&
6350+ nop == 0x01000000U)
6351+ {
6352+ unsigned long addr;
6353+
6354+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6355+ regs->u_regs[UREG_G1] <<= 32;
6356+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6357+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6358+ regs->tpc = addr;
6359+ regs->tnpc = addr+4;
6360+ return 2;
6361+ }
6362+ } while (0);
6363+
6364+ do { /* PaX: unpatched PLT emulation step 1 */
6365+ unsigned int sethi, ba, nop;
6366+
6367+ err = get_user(sethi, (unsigned int *)regs->tpc);
6368+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6369+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6370+
6371+ if (err)
6372+ break;
6373+
6374+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6375+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6376+ nop == 0x01000000U)
6377+ {
6378+ unsigned long addr;
6379+ unsigned int save, call;
6380+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6381+
6382+ if ((ba & 0xFFC00000U) == 0x30800000U)
6383+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6384+ else
6385+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6386+
6387+ if (test_thread_flag(TIF_32BIT))
6388+ addr &= 0xFFFFFFFFUL;
6389+
6390+ err = get_user(save, (unsigned int *)addr);
6391+ err |= get_user(call, (unsigned int *)(addr+4));
6392+ err |= get_user(nop, (unsigned int *)(addr+8));
6393+ if (err)
6394+ break;
6395+
6396+#ifdef CONFIG_PAX_DLRESOLVE
6397+ if (save == 0x9DE3BFA8U &&
6398+ (call & 0xC0000000U) == 0x40000000U &&
6399+ nop == 0x01000000U)
6400+ {
6401+ struct vm_area_struct *vma;
6402+ unsigned long call_dl_resolve;
6403+
6404+ down_read(&current->mm->mmap_sem);
6405+ call_dl_resolve = current->mm->call_dl_resolve;
6406+ up_read(&current->mm->mmap_sem);
6407+ if (likely(call_dl_resolve))
6408+ goto emulate;
6409+
6410+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6411+
6412+ down_write(&current->mm->mmap_sem);
6413+ if (current->mm->call_dl_resolve) {
6414+ call_dl_resolve = current->mm->call_dl_resolve;
6415+ up_write(&current->mm->mmap_sem);
6416+ if (vma)
6417+ kmem_cache_free(vm_area_cachep, vma);
6418+ goto emulate;
6419+ }
6420+
6421+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6422+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6423+ up_write(&current->mm->mmap_sem);
6424+ if (vma)
6425+ kmem_cache_free(vm_area_cachep, vma);
6426+ return 1;
6427+ }
6428+
6429+ if (pax_insert_vma(vma, call_dl_resolve)) {
6430+ up_write(&current->mm->mmap_sem);
6431+ kmem_cache_free(vm_area_cachep, vma);
6432+ return 1;
6433+ }
6434+
6435+ current->mm->call_dl_resolve = call_dl_resolve;
6436+ up_write(&current->mm->mmap_sem);
6437+
6438+emulate:
6439+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6440+ regs->tpc = call_dl_resolve;
6441+ regs->tnpc = addr+4;
6442+ return 3;
6443+ }
6444+#endif
6445+
6446+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6447+ if ((save & 0xFFC00000U) == 0x05000000U &&
6448+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6449+ nop == 0x01000000U)
6450+ {
6451+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6452+ regs->u_regs[UREG_G2] = addr + 4;
6453+ addr = (save & 0x003FFFFFU) << 10;
6454+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6455+
6456+ if (test_thread_flag(TIF_32BIT))
6457+ addr &= 0xFFFFFFFFUL;
6458+
6459+ regs->tpc = addr;
6460+ regs->tnpc = addr+4;
6461+ return 3;
6462+ }
6463+
6464+ /* PaX: 64-bit PLT stub */
6465+ err = get_user(sethi1, (unsigned int *)addr);
6466+ err |= get_user(sethi2, (unsigned int *)(addr+4));
6467+ err |= get_user(or1, (unsigned int *)(addr+8));
6468+ err |= get_user(or2, (unsigned int *)(addr+12));
6469+ err |= get_user(sllx, (unsigned int *)(addr+16));
6470+ err |= get_user(add, (unsigned int *)(addr+20));
6471+ err |= get_user(jmpl, (unsigned int *)(addr+24));
6472+ err |= get_user(nop, (unsigned int *)(addr+28));
6473+ if (err)
6474+ break;
6475+
6476+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6477+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6478+ (or1 & 0xFFFFE000U) == 0x88112000U &&
6479+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6480+ sllx == 0x89293020U &&
6481+ add == 0x8A010005U &&
6482+ jmpl == 0x89C14000U &&
6483+ nop == 0x01000000U)
6484+ {
6485+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6486+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6487+ regs->u_regs[UREG_G4] <<= 32;
6488+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6489+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6490+ regs->u_regs[UREG_G4] = addr + 24;
6491+ addr = regs->u_regs[UREG_G5];
6492+ regs->tpc = addr;
6493+ regs->tnpc = addr+4;
6494+ return 3;
6495+ }
6496+ }
6497+ } while (0);
6498+
6499+#ifdef CONFIG_PAX_DLRESOLVE
6500+ do { /* PaX: unpatched PLT emulation step 2 */
6501+ unsigned int save, call, nop;
6502+
6503+ err = get_user(save, (unsigned int *)(regs->tpc-4));
6504+ err |= get_user(call, (unsigned int *)regs->tpc);
6505+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6506+ if (err)
6507+ break;
6508+
6509+ if (save == 0x9DE3BFA8U &&
6510+ (call & 0xC0000000U) == 0x40000000U &&
6511+ nop == 0x01000000U)
6512+ {
6513+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6514+
6515+ if (test_thread_flag(TIF_32BIT))
6516+ dl_resolve &= 0xFFFFFFFFUL;
6517+
6518+ regs->u_regs[UREG_RETPC] = regs->tpc;
6519+ regs->tpc = dl_resolve;
6520+ regs->tnpc = dl_resolve+4;
6521+ return 3;
6522+ }
6523+ } while (0);
6524+#endif
6525+
6526+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6527+ unsigned int sethi, ba, nop;
6528+
6529+ err = get_user(sethi, (unsigned int *)regs->tpc);
6530+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6531+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6532+
6533+ if (err)
6534+ break;
6535+
6536+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6537+ (ba & 0xFFF00000U) == 0x30600000U &&
6538+ nop == 0x01000000U)
6539+ {
6540+ unsigned long addr;
6541+
6542+ addr = (sethi & 0x003FFFFFU) << 10;
6543+ regs->u_regs[UREG_G1] = addr;
6544+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6545+
6546+ if (test_thread_flag(TIF_32BIT))
6547+ addr &= 0xFFFFFFFFUL;
6548+
6549+ regs->tpc = addr;
6550+ regs->tnpc = addr+4;
6551+ return 2;
6552+ }
6553+ } while (0);
6554+
6555+#endif
6556+
6557+ return 1;
6558+}
6559+
6560+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6561+{
6562+ unsigned long i;
6563+
6564+ printk(KERN_ERR "PAX: bytes at PC: ");
6565+ for (i = 0; i < 8; i++) {
6566+ unsigned int c;
6567+ if (get_user(c, (unsigned int *)pc+i))
6568+ printk(KERN_CONT "???????? ");
6569+ else
6570+ printk(KERN_CONT "%08x ", c);
6571+ }
6572+ printk("\n");
6573+}
6574+#endif
6575+
6576 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6577 {
6578 struct mm_struct *mm = current->mm;
6579@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6580 if (!vma)
6581 goto bad_area;
6582
6583+#ifdef CONFIG_PAX_PAGEEXEC
6584+ /* PaX: detect ITLB misses on non-exec pages */
6585+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6586+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6587+ {
6588+ if (address != regs->tpc)
6589+ goto good_area;
6590+
6591+ up_read(&mm->mmap_sem);
6592+ switch (pax_handle_fetch_fault(regs)) {
6593+
6594+#ifdef CONFIG_PAX_EMUPLT
6595+ case 2:
6596+ case 3:
6597+ return;
6598+#endif
6599+
6600+ }
6601+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6602+ do_group_exit(SIGKILL);
6603+ }
6604+#endif
6605+
6606 /* Pure DTLB misses do not tell us whether the fault causing
6607 * load/store/atomic was a write or not, it only says that there
6608 * was no match. So in such a case we (carefully) read the
6609diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6610index f27d103..1b06377 100644
6611--- a/arch/sparc/mm/hugetlbpage.c
6612+++ b/arch/sparc/mm/hugetlbpage.c
6613@@ -69,7 +69,7 @@ full_search:
6614 }
6615 return -ENOMEM;
6616 }
6617- if (likely(!vma || addr + len <= vma->vm_start)) {
6618+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6619 /*
6620 * Remember the place where we stopped the search:
6621 */
6622@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6623 /* make sure it can fit in the remaining address space */
6624 if (likely(addr > len)) {
6625 vma = find_vma(mm, addr-len);
6626- if (!vma || addr <= vma->vm_start) {
6627+ if (check_heap_stack_gap(vma, addr - len, len)) {
6628 /* remember the address as a hint for next time */
6629 return (mm->free_area_cache = addr-len);
6630 }
6631@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6632 if (unlikely(mm->mmap_base < len))
6633 goto bottomup;
6634
6635- addr = (mm->mmap_base-len) & HPAGE_MASK;
6636+ addr = mm->mmap_base - len;
6637
6638 do {
6639+ addr &= HPAGE_MASK;
6640 /*
6641 * Lookup failure means no vma is above this address,
6642 * else if new region fits below vma->vm_start,
6643 * return with success:
6644 */
6645 vma = find_vma(mm, addr);
6646- if (likely(!vma || addr+len <= vma->vm_start)) {
6647+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6648 /* remember the address as a hint for next time */
6649 return (mm->free_area_cache = addr);
6650 }
6651@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6652 mm->cached_hole_size = vma->vm_start - addr;
6653
6654 /* try just below the current vma->vm_start */
6655- addr = (vma->vm_start-len) & HPAGE_MASK;
6656- } while (likely(len < vma->vm_start));
6657+ addr = skip_heap_stack_gap(vma, len);
6658+ } while (!IS_ERR_VALUE(addr));
6659
6660 bottomup:
6661 /*
6662@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6663 if (addr) {
6664 addr = ALIGN(addr, HPAGE_SIZE);
6665 vma = find_vma(mm, addr);
6666- if (task_size - len >= addr &&
6667- (!vma || addr + len <= vma->vm_start))
6668+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6669 return addr;
6670 }
6671 if (mm->get_unmapped_area == arch_get_unmapped_area)
6672diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6673index dc7c3b1..34c0070 100644
6674--- a/arch/sparc/mm/init_32.c
6675+++ b/arch/sparc/mm/init_32.c
6676@@ -317,6 +317,9 @@ extern void device_scan(void);
6677 pgprot_t PAGE_SHARED __read_mostly;
6678 EXPORT_SYMBOL(PAGE_SHARED);
6679
6680+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6681+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6682+
6683 void __init paging_init(void)
6684 {
6685 switch(sparc_cpu_model) {
6686@@ -345,17 +348,17 @@ void __init paging_init(void)
6687
6688 /* Initialize the protection map with non-constant, MMU dependent values. */
6689 protection_map[0] = PAGE_NONE;
6690- protection_map[1] = PAGE_READONLY;
6691- protection_map[2] = PAGE_COPY;
6692- protection_map[3] = PAGE_COPY;
6693+ protection_map[1] = PAGE_READONLY_NOEXEC;
6694+ protection_map[2] = PAGE_COPY_NOEXEC;
6695+ protection_map[3] = PAGE_COPY_NOEXEC;
6696 protection_map[4] = PAGE_READONLY;
6697 protection_map[5] = PAGE_READONLY;
6698 protection_map[6] = PAGE_COPY;
6699 protection_map[7] = PAGE_COPY;
6700 protection_map[8] = PAGE_NONE;
6701- protection_map[9] = PAGE_READONLY;
6702- protection_map[10] = PAGE_SHARED;
6703- protection_map[11] = PAGE_SHARED;
6704+ protection_map[9] = PAGE_READONLY_NOEXEC;
6705+ protection_map[10] = PAGE_SHARED_NOEXEC;
6706+ protection_map[11] = PAGE_SHARED_NOEXEC;
6707 protection_map[12] = PAGE_READONLY;
6708 protection_map[13] = PAGE_READONLY;
6709 protection_map[14] = PAGE_SHARED;
6710diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6711index 509b1ff..bfd7118 100644
6712--- a/arch/sparc/mm/srmmu.c
6713+++ b/arch/sparc/mm/srmmu.c
6714@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6715 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6716 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6717 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6718+
6719+#ifdef CONFIG_PAX_PAGEEXEC
6720+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6721+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6722+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6723+#endif
6724+
6725 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6726 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6727
6728diff --git a/arch/um/Makefile b/arch/um/Makefile
6729index fc633db..5e1a1c2 100644
6730--- a/arch/um/Makefile
6731+++ b/arch/um/Makefile
6732@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6733 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6734 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
6735
6736+ifdef CONSTIFY_PLUGIN
6737+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6738+endif
6739+
6740 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
6741
6742 #This will adjust *FLAGS accordingly to the platform.
6743diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6744index 6c03acd..a5e0215 100644
6745--- a/arch/um/include/asm/kmap_types.h
6746+++ b/arch/um/include/asm/kmap_types.h
6747@@ -23,6 +23,7 @@ enum km_type {
6748 KM_IRQ1,
6749 KM_SOFTIRQ0,
6750 KM_SOFTIRQ1,
6751+ KM_CLEARPAGE,
6752 KM_TYPE_NR
6753 };
6754
6755diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6756index 4cc9b6c..02e5029 100644
6757--- a/arch/um/include/asm/page.h
6758+++ b/arch/um/include/asm/page.h
6759@@ -14,6 +14,9 @@
6760 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6761 #define PAGE_MASK (~(PAGE_SIZE-1))
6762
6763+#define ktla_ktva(addr) (addr)
6764+#define ktva_ktla(addr) (addr)
6765+
6766 #ifndef __ASSEMBLY__
6767
6768 struct page;
6769diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6770index 4a28a15..654dc2a 100644
6771--- a/arch/um/kernel/process.c
6772+++ b/arch/um/kernel/process.c
6773@@ -393,22 +393,6 @@ int singlestepping(void * t)
6774 return 2;
6775 }
6776
6777-/*
6778- * Only x86 and x86_64 have an arch_align_stack().
6779- * All other arches have "#define arch_align_stack(x) (x)"
6780- * in their asm/system.h
6781- * As this is included in UML from asm-um/system-generic.h,
6782- * we can use it to behave as the subarch does.
6783- */
6784-#ifndef arch_align_stack
6785-unsigned long arch_align_stack(unsigned long sp)
6786-{
6787- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6788- sp -= get_random_int() % 8192;
6789- return sp & ~0xf;
6790-}
6791-#endif
6792-
6793 unsigned long get_wchan(struct task_struct *p)
6794 {
6795 unsigned long stack_page, sp, ip;
6796diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
6797index d1b93c4..ae1b7fd 100644
6798--- a/arch/um/sys-i386/shared/sysdep/system.h
6799+++ b/arch/um/sys-i386/shared/sysdep/system.h
6800@@ -17,7 +17,7 @@
6801 # define AT_VECTOR_SIZE_ARCH 1
6802 #endif
6803
6804-extern unsigned long arch_align_stack(unsigned long sp);
6805+#define arch_align_stack(x) ((x) & ~0xfUL)
6806
6807 void default_idle(void);
6808
6809diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
6810index 857ca0b..9a2669d 100644
6811--- a/arch/um/sys-i386/syscalls.c
6812+++ b/arch/um/sys-i386/syscalls.c
6813@@ -11,6 +11,21 @@
6814 #include "asm/uaccess.h"
6815 #include "asm/unistd.h"
6816
6817+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6818+{
6819+ unsigned long pax_task_size = TASK_SIZE;
6820+
6821+#ifdef CONFIG_PAX_SEGMEXEC
6822+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6823+ pax_task_size = SEGMEXEC_TASK_SIZE;
6824+#endif
6825+
6826+ if (len > pax_task_size || addr > pax_task_size - len)
6827+ return -EINVAL;
6828+
6829+ return 0;
6830+}
6831+
6832 /*
6833 * Perform the select(nd, in, out, ex, tv) and mmap() system
6834 * calls. Linux/i386 didn't use to be able to handle more than
6835diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
6836index d1b93c4..ae1b7fd 100644
6837--- a/arch/um/sys-x86_64/shared/sysdep/system.h
6838+++ b/arch/um/sys-x86_64/shared/sysdep/system.h
6839@@ -17,7 +17,7 @@
6840 # define AT_VECTOR_SIZE_ARCH 1
6841 #endif
6842
6843-extern unsigned long arch_align_stack(unsigned long sp);
6844+#define arch_align_stack(x) ((x) & ~0xfUL)
6845
6846 void default_idle(void);
6847
6848diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
6849index 73ae02a..f932de5 100644
6850--- a/arch/x86/Kconfig
6851+++ b/arch/x86/Kconfig
6852@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
6853
6854 config X86_32_LAZY_GS
6855 def_bool y
6856- depends on X86_32 && !CC_STACKPROTECTOR
6857+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
6858
6859 config KTIME_SCALAR
6860 def_bool X86_32
6861@@ -1008,7 +1008,7 @@ choice
6862
6863 config NOHIGHMEM
6864 bool "off"
6865- depends on !X86_NUMAQ
6866+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6867 ---help---
6868 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
6869 However, the address space of 32-bit x86 processors is only 4
6870@@ -1045,7 +1045,7 @@ config NOHIGHMEM
6871
6872 config HIGHMEM4G
6873 bool "4GB"
6874- depends on !X86_NUMAQ
6875+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6876 ---help---
6877 Select this if you have a 32-bit processor and between 1 and 4
6878 gigabytes of physical RAM.
6879@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
6880 hex
6881 default 0xB0000000 if VMSPLIT_3G_OPT
6882 default 0x80000000 if VMSPLIT_2G
6883- default 0x78000000 if VMSPLIT_2G_OPT
6884+ default 0x70000000 if VMSPLIT_2G_OPT
6885 default 0x40000000 if VMSPLIT_1G
6886 default 0xC0000000
6887 depends on X86_32
6888@@ -1460,6 +1460,7 @@ config SECCOMP
6889
6890 config CC_STACKPROTECTOR
6891 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
6892+ depends on X86_64 || !PAX_MEMORY_UDEREF
6893 ---help---
6894 This option turns on the -fstack-protector GCC feature. This
6895 feature puts, at the beginning of functions, a canary value on
6896@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
6897 config PHYSICAL_START
6898 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
6899 default "0x1000000"
6900+ range 0x400000 0x40000000
6901 ---help---
6902 This gives the physical address where the kernel is loaded.
6903
6904@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
6905 hex
6906 prompt "Alignment value to which kernel should be aligned" if X86_32
6907 default "0x1000000"
6908+ range 0x400000 0x1000000 if PAX_KERNEXEC
6909 range 0x2000 0x1000000
6910 ---help---
6911 This value puts the alignment restrictions on physical address
6912@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
6913 Say N if you want to disable CPU hotplug.
6914
6915 config COMPAT_VDSO
6916- def_bool y
6917+ def_bool n
6918 prompt "Compat VDSO support"
6919 depends on X86_32 || IA32_EMULATION
6920+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
6921 ---help---
6922 Map the 32-bit VDSO to the predictable old-style address too.
6923 ---help---
6924diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
6925index 0e566103..1a6b57e 100644
6926--- a/arch/x86/Kconfig.cpu
6927+++ b/arch/x86/Kconfig.cpu
6928@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
6929
6930 config X86_F00F_BUG
6931 def_bool y
6932- depends on M586MMX || M586TSC || M586 || M486 || M386
6933+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
6934
6935 config X86_WP_WORKS_OK
6936 def_bool y
6937@@ -360,7 +360,7 @@ config X86_POPAD_OK
6938
6939 config X86_ALIGNMENT_16
6940 def_bool y
6941- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6942+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6943
6944 config X86_INTEL_USERCOPY
6945 def_bool y
6946@@ -406,7 +406,7 @@ config X86_CMPXCHG64
6947 # generates cmov.
6948 config X86_CMOV
6949 def_bool y
6950- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6951+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6952
6953 config X86_MINIMUM_CPU_FAMILY
6954 int
6955diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
6956index d105f29..c928727 100644
6957--- a/arch/x86/Kconfig.debug
6958+++ b/arch/x86/Kconfig.debug
6959@@ -99,7 +99,7 @@ config X86_PTDUMP
6960 config DEBUG_RODATA
6961 bool "Write protect kernel read-only data structures"
6962 default y
6963- depends on DEBUG_KERNEL
6964+ depends on DEBUG_KERNEL && BROKEN
6965 ---help---
6966 Mark the kernel read-only data as write-protected in the pagetables,
6967 in order to catch accidental (and incorrect) writes to such const
6968diff --git a/arch/x86/Makefile b/arch/x86/Makefile
6969index d2d24c9..0f21f8d 100644
6970--- a/arch/x86/Makefile
6971+++ b/arch/x86/Makefile
6972@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
6973 else
6974 BITS := 64
6975 UTS_MACHINE := x86_64
6976+ biarch := $(call cc-option,-m64)
6977 CHECKFLAGS += -D__x86_64__ -m64
6978
6979 KBUILD_AFLAGS += -m64
6980@@ -189,3 +190,12 @@ define archhelp
6981 echo ' FDARGS="..." arguments for the booted kernel'
6982 echo ' FDINITRD=file initrd for the booted kernel'
6983 endef
6984+
6985+define OLD_LD
6986+
6987+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
6988+*** Please upgrade your binutils to 2.18 or newer
6989+endef
6990+
6991+archprepare:
6992+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
6993diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
6994index ec749c2..bbb5319 100644
6995--- a/arch/x86/boot/Makefile
6996+++ b/arch/x86/boot/Makefile
6997@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
6998 $(call cc-option, -fno-stack-protector) \
6999 $(call cc-option, -mpreferred-stack-boundary=2)
7000 KBUILD_CFLAGS += $(call cc-option, -m32)
7001+ifdef CONSTIFY_PLUGIN
7002+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7003+endif
7004 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7005 GCOV_PROFILE := n
7006
7007diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7008index 878e4b9..20537ab 100644
7009--- a/arch/x86/boot/bitops.h
7010+++ b/arch/x86/boot/bitops.h
7011@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7012 u8 v;
7013 const u32 *p = (const u32 *)addr;
7014
7015- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7016+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7017 return v;
7018 }
7019
7020@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7021
7022 static inline void set_bit(int nr, void *addr)
7023 {
7024- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7025+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7026 }
7027
7028 #endif /* BOOT_BITOPS_H */
7029diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7030index 98239d2..f40214c 100644
7031--- a/arch/x86/boot/boot.h
7032+++ b/arch/x86/boot/boot.h
7033@@ -82,7 +82,7 @@ static inline void io_delay(void)
7034 static inline u16 ds(void)
7035 {
7036 u16 seg;
7037- asm("movw %%ds,%0" : "=rm" (seg));
7038+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7039 return seg;
7040 }
7041
7042@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7043 static inline int memcmp(const void *s1, const void *s2, size_t len)
7044 {
7045 u8 diff;
7046- asm("repe; cmpsb; setnz %0"
7047+ asm volatile("repe; cmpsb; setnz %0"
7048 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7049 return diff;
7050 }
7051diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7052index f8ed065..5bf5ff3 100644
7053--- a/arch/x86/boot/compressed/Makefile
7054+++ b/arch/x86/boot/compressed/Makefile
7055@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7056 KBUILD_CFLAGS += $(cflags-y)
7057 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7058 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7059+ifdef CONSTIFY_PLUGIN
7060+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7061+endif
7062
7063 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7064 GCOV_PROFILE := n
7065diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7066index f543b70..b60fba8 100644
7067--- a/arch/x86/boot/compressed/head_32.S
7068+++ b/arch/x86/boot/compressed/head_32.S
7069@@ -76,7 +76,7 @@ ENTRY(startup_32)
7070 notl %eax
7071 andl %eax, %ebx
7072 #else
7073- movl $LOAD_PHYSICAL_ADDR, %ebx
7074+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7075 #endif
7076
7077 /* Target address to relocate to for decompression */
7078@@ -149,7 +149,7 @@ relocated:
7079 * and where it was actually loaded.
7080 */
7081 movl %ebp, %ebx
7082- subl $LOAD_PHYSICAL_ADDR, %ebx
7083+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7084 jz 2f /* Nothing to be done if loaded at compiled addr. */
7085 /*
7086 * Process relocations.
7087@@ -157,8 +157,7 @@ relocated:
7088
7089 1: subl $4, %edi
7090 movl (%edi), %ecx
7091- testl %ecx, %ecx
7092- jz 2f
7093+ jecxz 2f
7094 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7095 jmp 1b
7096 2:
7097diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7098index 077e1b6..2c6b13b 100644
7099--- a/arch/x86/boot/compressed/head_64.S
7100+++ b/arch/x86/boot/compressed/head_64.S
7101@@ -91,7 +91,7 @@ ENTRY(startup_32)
7102 notl %eax
7103 andl %eax, %ebx
7104 #else
7105- movl $LOAD_PHYSICAL_ADDR, %ebx
7106+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7107 #endif
7108
7109 /* Target address to relocate to for decompression */
7110@@ -183,7 +183,7 @@ no_longmode:
7111 hlt
7112 jmp 1b
7113
7114-#include "../../kernel/verify_cpu_64.S"
7115+#include "../../kernel/verify_cpu.S"
7116
7117 /*
7118 * Be careful here startup_64 needs to be at a predictable
7119@@ -234,7 +234,7 @@ ENTRY(startup_64)
7120 notq %rax
7121 andq %rax, %rbp
7122 #else
7123- movq $LOAD_PHYSICAL_ADDR, %rbp
7124+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7125 #endif
7126
7127 /* Target address to relocate to for decompression */
7128diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7129index 842b2a3..f00178b 100644
7130--- a/arch/x86/boot/compressed/misc.c
7131+++ b/arch/x86/boot/compressed/misc.c
7132@@ -288,7 +288,7 @@ static void parse_elf(void *output)
7133 case PT_LOAD:
7134 #ifdef CONFIG_RELOCATABLE
7135 dest = output;
7136- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7137+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7138 #else
7139 dest = (void *)(phdr->p_paddr);
7140 #endif
7141@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7142 error("Destination address too large");
7143 #endif
7144 #ifndef CONFIG_RELOCATABLE
7145- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7146+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7147 error("Wrong destination address");
7148 #endif
7149
7150diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7151index bcbd36c..b1754af 100644
7152--- a/arch/x86/boot/compressed/mkpiggy.c
7153+++ b/arch/x86/boot/compressed/mkpiggy.c
7154@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7155
7156 offs = (olen > ilen) ? olen - ilen : 0;
7157 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7158- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7159+ offs += 64*1024; /* Add 64K bytes slack */
7160 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7161
7162 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7163diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7164index bbeb0c3..f5167ab 100644
7165--- a/arch/x86/boot/compressed/relocs.c
7166+++ b/arch/x86/boot/compressed/relocs.c
7167@@ -10,8 +10,11 @@
7168 #define USE_BSD
7169 #include <endian.h>
7170
7171+#include "../../../../include/linux/autoconf.h"
7172+
7173 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7174 static Elf32_Ehdr ehdr;
7175+static Elf32_Phdr *phdr;
7176 static unsigned long reloc_count, reloc_idx;
7177 static unsigned long *relocs;
7178
7179@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7180
7181 static int is_safe_abs_reloc(const char* sym_name)
7182 {
7183- int i;
7184+ unsigned int i;
7185
7186 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7187 if (!strcmp(sym_name, safe_abs_relocs[i]))
7188@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7189 }
7190 }
7191
7192+static void read_phdrs(FILE *fp)
7193+{
7194+ unsigned int i;
7195+
7196+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7197+ if (!phdr) {
7198+ die("Unable to allocate %d program headers\n",
7199+ ehdr.e_phnum);
7200+ }
7201+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7202+ die("Seek to %d failed: %s\n",
7203+ ehdr.e_phoff, strerror(errno));
7204+ }
7205+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7206+ die("Cannot read ELF program headers: %s\n",
7207+ strerror(errno));
7208+ }
7209+ for(i = 0; i < ehdr.e_phnum; i++) {
7210+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7211+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7212+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7213+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7214+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7215+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7216+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7217+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7218+ }
7219+
7220+}
7221+
7222 static void read_shdrs(FILE *fp)
7223 {
7224- int i;
7225+ unsigned int i;
7226 Elf32_Shdr shdr;
7227
7228 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7229@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7230
7231 static void read_strtabs(FILE *fp)
7232 {
7233- int i;
7234+ unsigned int i;
7235 for (i = 0; i < ehdr.e_shnum; i++) {
7236 struct section *sec = &secs[i];
7237 if (sec->shdr.sh_type != SHT_STRTAB) {
7238@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7239
7240 static void read_symtabs(FILE *fp)
7241 {
7242- int i,j;
7243+ unsigned int i,j;
7244 for (i = 0; i < ehdr.e_shnum; i++) {
7245 struct section *sec = &secs[i];
7246 if (sec->shdr.sh_type != SHT_SYMTAB) {
7247@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7248
7249 static void read_relocs(FILE *fp)
7250 {
7251- int i,j;
7252+ unsigned int i,j;
7253+ uint32_t base;
7254+
7255 for (i = 0; i < ehdr.e_shnum; i++) {
7256 struct section *sec = &secs[i];
7257 if (sec->shdr.sh_type != SHT_REL) {
7258@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7259 die("Cannot read symbol table: %s\n",
7260 strerror(errno));
7261 }
7262+ base = 0;
7263+ for (j = 0; j < ehdr.e_phnum; j++) {
7264+ if (phdr[j].p_type != PT_LOAD )
7265+ continue;
7266+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7267+ continue;
7268+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7269+ break;
7270+ }
7271 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7272 Elf32_Rel *rel = &sec->reltab[j];
7273- rel->r_offset = elf32_to_cpu(rel->r_offset);
7274+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7275 rel->r_info = elf32_to_cpu(rel->r_info);
7276 }
7277 }
7278@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7279
7280 static void print_absolute_symbols(void)
7281 {
7282- int i;
7283+ unsigned int i;
7284 printf("Absolute symbols\n");
7285 printf(" Num: Value Size Type Bind Visibility Name\n");
7286 for (i = 0; i < ehdr.e_shnum; i++) {
7287 struct section *sec = &secs[i];
7288 char *sym_strtab;
7289 Elf32_Sym *sh_symtab;
7290- int j;
7291+ unsigned int j;
7292
7293 if (sec->shdr.sh_type != SHT_SYMTAB) {
7294 continue;
7295@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7296
7297 static void print_absolute_relocs(void)
7298 {
7299- int i, printed = 0;
7300+ unsigned int i, printed = 0;
7301
7302 for (i = 0; i < ehdr.e_shnum; i++) {
7303 struct section *sec = &secs[i];
7304 struct section *sec_applies, *sec_symtab;
7305 char *sym_strtab;
7306 Elf32_Sym *sh_symtab;
7307- int j;
7308+ unsigned int j;
7309 if (sec->shdr.sh_type != SHT_REL) {
7310 continue;
7311 }
7312@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7313
7314 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7315 {
7316- int i;
7317+ unsigned int i;
7318 /* Walk through the relocations */
7319 for (i = 0; i < ehdr.e_shnum; i++) {
7320 char *sym_strtab;
7321 Elf32_Sym *sh_symtab;
7322 struct section *sec_applies, *sec_symtab;
7323- int j;
7324+ unsigned int j;
7325 struct section *sec = &secs[i];
7326
7327 if (sec->shdr.sh_type != SHT_REL) {
7328@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7329 if (sym->st_shndx == SHN_ABS) {
7330 continue;
7331 }
7332+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7333+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7334+ continue;
7335+
7336+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7337+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7338+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7339+ continue;
7340+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7341+ continue;
7342+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7343+ continue;
7344+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7345+ continue;
7346+#endif
7347 if (r_type == R_386_NONE || r_type == R_386_PC32) {
7348 /*
7349 * NONE can be ignored and and PC relative
7350@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
7351
7352 static void emit_relocs(int as_text)
7353 {
7354- int i;
7355+ unsigned int i;
7356 /* Count how many relocations I have and allocate space for them. */
7357 reloc_count = 0;
7358 walk_relocs(count_reloc);
7359@@ -634,6 +693,7 @@ int main(int argc, char **argv)
7360 fname, strerror(errno));
7361 }
7362 read_ehdr(fp);
7363+ read_phdrs(fp);
7364 read_shdrs(fp);
7365 read_strtabs(fp);
7366 read_symtabs(fp);
7367diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7368index 4d3ff03..e4972ff 100644
7369--- a/arch/x86/boot/cpucheck.c
7370+++ b/arch/x86/boot/cpucheck.c
7371@@ -74,7 +74,7 @@ static int has_fpu(void)
7372 u16 fcw = -1, fsw = -1;
7373 u32 cr0;
7374
7375- asm("movl %%cr0,%0" : "=r" (cr0));
7376+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
7377 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7378 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7379 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7380@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7381 {
7382 u32 f0, f1;
7383
7384- asm("pushfl ; "
7385+ asm volatile("pushfl ; "
7386 "pushfl ; "
7387 "popl %0 ; "
7388 "movl %0,%1 ; "
7389@@ -115,7 +115,7 @@ static void get_flags(void)
7390 set_bit(X86_FEATURE_FPU, cpu.flags);
7391
7392 if (has_eflag(X86_EFLAGS_ID)) {
7393- asm("cpuid"
7394+ asm volatile("cpuid"
7395 : "=a" (max_intel_level),
7396 "=b" (cpu_vendor[0]),
7397 "=d" (cpu_vendor[1]),
7398@@ -124,7 +124,7 @@ static void get_flags(void)
7399
7400 if (max_intel_level >= 0x00000001 &&
7401 max_intel_level <= 0x0000ffff) {
7402- asm("cpuid"
7403+ asm volatile("cpuid"
7404 : "=a" (tfms),
7405 "=c" (cpu.flags[4]),
7406 "=d" (cpu.flags[0])
7407@@ -136,7 +136,7 @@ static void get_flags(void)
7408 cpu.model += ((tfms >> 16) & 0xf) << 4;
7409 }
7410
7411- asm("cpuid"
7412+ asm volatile("cpuid"
7413 : "=a" (max_amd_level)
7414 : "a" (0x80000000)
7415 : "ebx", "ecx", "edx");
7416@@ -144,7 +144,7 @@ static void get_flags(void)
7417 if (max_amd_level >= 0x80000001 &&
7418 max_amd_level <= 0x8000ffff) {
7419 u32 eax = 0x80000001;
7420- asm("cpuid"
7421+ asm volatile("cpuid"
7422 : "+a" (eax),
7423 "=c" (cpu.flags[6]),
7424 "=d" (cpu.flags[1])
7425@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7426 u32 ecx = MSR_K7_HWCR;
7427 u32 eax, edx;
7428
7429- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7430+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7431 eax &= ~(1 << 15);
7432- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7433+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7434
7435 get_flags(); /* Make sure it really did something */
7436 err = check_flags();
7437@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7438 u32 ecx = MSR_VIA_FCR;
7439 u32 eax, edx;
7440
7441- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7442+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7443 eax |= (1<<1)|(1<<7);
7444- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7445+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7446
7447 set_bit(X86_FEATURE_CX8, cpu.flags);
7448 err = check_flags();
7449@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7450 u32 eax, edx;
7451 u32 level = 1;
7452
7453- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7454- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7455- asm("cpuid"
7456+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7457+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7458+ asm volatile("cpuid"
7459 : "+a" (level), "=d" (cpu.flags[0])
7460 : : "ecx", "ebx");
7461- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7462+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7463
7464 err = check_flags();
7465 }
7466diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7467index b31cc54..8d69237 100644
7468--- a/arch/x86/boot/header.S
7469+++ b/arch/x86/boot/header.S
7470@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7471 # single linked list of
7472 # struct setup_data
7473
7474-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7475+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7476
7477 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7478 #define VO_INIT_SIZE (VO__end - VO__text)
7479diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7480index cae3feb..ff8ff2a 100644
7481--- a/arch/x86/boot/memory.c
7482+++ b/arch/x86/boot/memory.c
7483@@ -19,7 +19,7 @@
7484
7485 static int detect_memory_e820(void)
7486 {
7487- int count = 0;
7488+ unsigned int count = 0;
7489 struct biosregs ireg, oreg;
7490 struct e820entry *desc = boot_params.e820_map;
7491 static struct e820entry buf; /* static so it is zeroed */
7492diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7493index 11e8c6e..fdbb1ed 100644
7494--- a/arch/x86/boot/video-vesa.c
7495+++ b/arch/x86/boot/video-vesa.c
7496@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7497
7498 boot_params.screen_info.vesapm_seg = oreg.es;
7499 boot_params.screen_info.vesapm_off = oreg.di;
7500+ boot_params.screen_info.vesapm_size = oreg.cx;
7501 }
7502
7503 /*
7504diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7505index d42da38..787cdf3 100644
7506--- a/arch/x86/boot/video.c
7507+++ b/arch/x86/boot/video.c
7508@@ -90,7 +90,7 @@ static void store_mode_params(void)
7509 static unsigned int get_entry(void)
7510 {
7511 char entry_buf[4];
7512- int i, len = 0;
7513+ unsigned int i, len = 0;
7514 int key;
7515 unsigned int v;
7516
7517diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7518index 5b577d5..3c1fed4 100644
7519--- a/arch/x86/crypto/aes-x86_64-asm_64.S
7520+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7521@@ -8,6 +8,8 @@
7522 * including this sentence is retained in full.
7523 */
7524
7525+#include <asm/alternative-asm.h>
7526+
7527 .extern crypto_ft_tab
7528 .extern crypto_it_tab
7529 .extern crypto_fl_tab
7530@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7531 je B192; \
7532 leaq 32(r9),r9;
7533
7534+#define ret pax_force_retaddr 0, 1; ret
7535+
7536 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7537 movq r1,r2; \
7538 movq r3,r4; \
7539diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7540index eb0566e..e3ebad8 100644
7541--- a/arch/x86/crypto/aesni-intel_asm.S
7542+++ b/arch/x86/crypto/aesni-intel_asm.S
7543@@ -16,6 +16,7 @@
7544 */
7545
7546 #include <linux/linkage.h>
7547+#include <asm/alternative-asm.h>
7548
7549 .text
7550
7551@@ -52,6 +53,7 @@ _key_expansion_256a:
7552 pxor %xmm1, %xmm0
7553 movaps %xmm0, (%rcx)
7554 add $0x10, %rcx
7555+ pax_force_retaddr_bts
7556 ret
7557
7558 _key_expansion_192a:
7559@@ -75,6 +77,7 @@ _key_expansion_192a:
7560 shufps $0b01001110, %xmm2, %xmm1
7561 movaps %xmm1, 16(%rcx)
7562 add $0x20, %rcx
7563+ pax_force_retaddr_bts
7564 ret
7565
7566 _key_expansion_192b:
7567@@ -93,6 +96,7 @@ _key_expansion_192b:
7568
7569 movaps %xmm0, (%rcx)
7570 add $0x10, %rcx
7571+ pax_force_retaddr_bts
7572 ret
7573
7574 _key_expansion_256b:
7575@@ -104,6 +108,7 @@ _key_expansion_256b:
7576 pxor %xmm1, %xmm2
7577 movaps %xmm2, (%rcx)
7578 add $0x10, %rcx
7579+ pax_force_retaddr_bts
7580 ret
7581
7582 /*
7583@@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
7584 cmp %rcx, %rdi
7585 jb .Ldec_key_loop
7586 xor %rax, %rax
7587+ pax_force_retaddr 0, 1
7588 ret
7589+ENDPROC(aesni_set_key)
7590
7591 /*
7592 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7593@@ -249,7 +256,9 @@ ENTRY(aesni_enc)
7594 movups (INP), STATE # input
7595 call _aesni_enc1
7596 movups STATE, (OUTP) # output
7597+ pax_force_retaddr 0, 1
7598 ret
7599+ENDPROC(aesni_enc)
7600
7601 /*
7602 * _aesni_enc1: internal ABI
7603@@ -319,6 +328,7 @@ _aesni_enc1:
7604 movaps 0x70(TKEYP), KEY
7605 # aesenclast KEY, STATE # last round
7606 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
7607+ pax_force_retaddr_bts
7608 ret
7609
7610 /*
7611@@ -482,6 +492,7 @@ _aesni_enc4:
7612 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
7613 # aesenclast KEY, STATE4
7614 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
7615+ pax_force_retaddr_bts
7616 ret
7617
7618 /*
7619@@ -493,7 +504,9 @@ ENTRY(aesni_dec)
7620 movups (INP), STATE # input
7621 call _aesni_dec1
7622 movups STATE, (OUTP) #output
7623+ pax_force_retaddr 0, 1
7624 ret
7625+ENDPROC(aesni_dec)
7626
7627 /*
7628 * _aesni_dec1: internal ABI
7629@@ -563,6 +576,7 @@ _aesni_dec1:
7630 movaps 0x70(TKEYP), KEY
7631 # aesdeclast KEY, STATE # last round
7632 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
7633+ pax_force_retaddr_bts
7634 ret
7635
7636 /*
7637@@ -726,6 +740,7 @@ _aesni_dec4:
7638 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
7639 # aesdeclast KEY, STATE4
7640 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
7641+ pax_force_retaddr_bts
7642 ret
7643
7644 /*
7645@@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
7646 cmp $16, LEN
7647 jge .Lecb_enc_loop1
7648 .Lecb_enc_ret:
7649+ pax_force_retaddr 0, 1
7650 ret
7651+ENDPROC(aesni_ecb_enc)
7652
7653 /*
7654 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7655@@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
7656 cmp $16, LEN
7657 jge .Lecb_dec_loop1
7658 .Lecb_dec_ret:
7659+ pax_force_retaddr 0, 1
7660 ret
7661+ENDPROC(aesni_ecb_dec)
7662
7663 /*
7664 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7665@@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
7666 jge .Lcbc_enc_loop
7667 movups STATE, (IVP)
7668 .Lcbc_enc_ret:
7669+ pax_force_retaddr 0, 1
7670 ret
7671+ENDPROC(aesni_cbc_enc)
7672
7673 /*
7674 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7675@@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
7676 .Lcbc_dec_ret:
7677 movups IV, (IVP)
7678 .Lcbc_dec_just_ret:
7679+ pax_force_retaddr 0, 1
7680 ret
7681+ENDPROC(aesni_cbc_dec)
7682diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7683index 6214a9b..1f4fc9a 100644
7684--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7685+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7686@@ -1,3 +1,5 @@
7687+#include <asm/alternative-asm.h>
7688+
7689 # enter ECRYPT_encrypt_bytes
7690 .text
7691 .p2align 5
7692@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7693 add %r11,%rsp
7694 mov %rdi,%rax
7695 mov %rsi,%rdx
7696+ pax_force_retaddr 0, 1
7697 ret
7698 # bytesatleast65:
7699 ._bytesatleast65:
7700@@ -891,6 +894,7 @@ ECRYPT_keysetup:
7701 add %r11,%rsp
7702 mov %rdi,%rax
7703 mov %rsi,%rdx
7704+ pax_force_retaddr
7705 ret
7706 # enter ECRYPT_ivsetup
7707 .text
7708@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7709 add %r11,%rsp
7710 mov %rdi,%rax
7711 mov %rsi,%rdx
7712+ pax_force_retaddr
7713 ret
7714diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
7715index 35974a5..5662ae2 100644
7716--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
7717+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
7718@@ -21,6 +21,7 @@
7719 .text
7720
7721 #include <asm/asm-offsets.h>
7722+#include <asm/alternative-asm.h>
7723
7724 #define a_offset 0
7725 #define b_offset 4
7726@@ -269,6 +270,7 @@ twofish_enc_blk:
7727
7728 popq R1
7729 movq $1,%rax
7730+ pax_force_retaddr 0, 1
7731 ret
7732
7733 twofish_dec_blk:
7734@@ -321,4 +323,5 @@ twofish_dec_blk:
7735
7736 popq R1
7737 movq $1,%rax
7738+ pax_force_retaddr 0, 1
7739 ret
7740diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
7741index 14531ab..a89a0c0 100644
7742--- a/arch/x86/ia32/ia32_aout.c
7743+++ b/arch/x86/ia32/ia32_aout.c
7744@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7745 unsigned long dump_start, dump_size;
7746 struct user32 dump;
7747
7748+ memset(&dump, 0, sizeof(dump));
7749+
7750 fs = get_fs();
7751 set_fs(KERNEL_DS);
7752 has_dumped = 1;
7753@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7754 dump_size = dump.u_ssize << PAGE_SHIFT;
7755 DUMP_WRITE(dump_start, dump_size);
7756 }
7757- /*
7758- * Finally dump the task struct. Not be used by gdb, but
7759- * could be useful
7760- */
7761- set_fs(KERNEL_DS);
7762- DUMP_WRITE(current, sizeof(*current));
7763 end_coredump:
7764 set_fs(fs);
7765 return has_dumped;
7766diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
7767index 588a7aa..a3468b0 100644
7768--- a/arch/x86/ia32/ia32_signal.c
7769+++ b/arch/x86/ia32/ia32_signal.c
7770@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
7771 }
7772 seg = get_fs();
7773 set_fs(KERNEL_DS);
7774- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
7775+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
7776 set_fs(seg);
7777 if (ret >= 0 && uoss_ptr) {
7778 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
7779@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
7780 */
7781 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7782 size_t frame_size,
7783- void **fpstate)
7784+ void __user **fpstate)
7785 {
7786 unsigned long sp;
7787
7788@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7789
7790 if (used_math()) {
7791 sp = sp - sig_xstate_ia32_size;
7792- *fpstate = (struct _fpstate_ia32 *) sp;
7793+ *fpstate = (struct _fpstate_ia32 __user *) sp;
7794 if (save_i387_xstate_ia32(*fpstate) < 0)
7795 return (void __user *) -1L;
7796 }
7797@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7798 sp -= frame_size;
7799 /* Align the stack pointer according to the i386 ABI,
7800 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
7801- sp = ((sp + 4) & -16ul) - 4;
7802+ sp = ((sp - 12) & -16ul) - 4;
7803 return (void __user *) sp;
7804 }
7805
7806@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
7807 * These are actually not used anymore, but left because some
7808 * gdb versions depend on them as a marker.
7809 */
7810- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7811+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7812 } put_user_catch(err);
7813
7814 if (err)
7815@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7816 0xb8,
7817 __NR_ia32_rt_sigreturn,
7818 0x80cd,
7819- 0,
7820+ 0
7821 };
7822
7823 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
7824@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7825
7826 if (ka->sa.sa_flags & SA_RESTORER)
7827 restorer = ka->sa.sa_restorer;
7828+ else if (current->mm->context.vdso)
7829+ /* Return stub is in 32bit vsyscall page */
7830+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
7831 else
7832- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
7833- rt_sigreturn);
7834+ restorer = &frame->retcode;
7835 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
7836
7837 /*
7838 * Not actually used anymore, but left because some gdb
7839 * versions need it.
7840 */
7841- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7842+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7843 } put_user_catch(err);
7844
7845 if (err)
7846diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
7847index 4edd8eb..07ac7fd 100644
7848--- a/arch/x86/ia32/ia32entry.S
7849+++ b/arch/x86/ia32/ia32entry.S
7850@@ -13,7 +13,9 @@
7851 #include <asm/thread_info.h>
7852 #include <asm/segment.h>
7853 #include <asm/irqflags.h>
7854+#include <asm/pgtable.h>
7855 #include <linux/linkage.h>
7856+#include <asm/alternative-asm.h>
7857
7858 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
7859 #include <linux/elf-em.h>
7860@@ -93,6 +95,30 @@ ENTRY(native_irq_enable_sysexit)
7861 ENDPROC(native_irq_enable_sysexit)
7862 #endif
7863
7864+ .macro pax_enter_kernel_user
7865+ pax_set_fptr_mask
7866+#ifdef CONFIG_PAX_MEMORY_UDEREF
7867+ call pax_enter_kernel_user
7868+#endif
7869+ .endm
7870+
7871+ .macro pax_exit_kernel_user
7872+#ifdef CONFIG_PAX_MEMORY_UDEREF
7873+ call pax_exit_kernel_user
7874+#endif
7875+#ifdef CONFIG_PAX_RANDKSTACK
7876+ pushq %rax
7877+ call pax_randomize_kstack
7878+ popq %rax
7879+#endif
7880+ .endm
7881+
7882+.macro pax_erase_kstack
7883+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
7884+ call pax_erase_kstack
7885+#endif
7886+.endm
7887+
7888 /*
7889 * 32bit SYSENTER instruction entry.
7890 *
7891@@ -119,12 +145,6 @@ ENTRY(ia32_sysenter_target)
7892 CFI_REGISTER rsp,rbp
7893 SWAPGS_UNSAFE_STACK
7894 movq PER_CPU_VAR(kernel_stack), %rsp
7895- addq $(KERNEL_STACK_OFFSET),%rsp
7896- /*
7897- * No need to follow this irqs on/off section: the syscall
7898- * disabled irqs, here we enable it straight after entry:
7899- */
7900- ENABLE_INTERRUPTS(CLBR_NONE)
7901 movl %ebp,%ebp /* zero extension */
7902 pushq $__USER32_DS
7903 CFI_ADJUST_CFA_OFFSET 8
7904@@ -135,28 +155,41 @@ ENTRY(ia32_sysenter_target)
7905 pushfq
7906 CFI_ADJUST_CFA_OFFSET 8
7907 /*CFI_REL_OFFSET rflags,0*/
7908- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
7909- CFI_REGISTER rip,r10
7910+ GET_THREAD_INFO(%r11)
7911+ movl TI_sysenter_return(%r11), %r11d
7912+ CFI_REGISTER rip,r11
7913 pushq $__USER32_CS
7914 CFI_ADJUST_CFA_OFFSET 8
7915 /*CFI_REL_OFFSET cs,0*/
7916 movl %eax, %eax
7917- pushq %r10
7918+ pushq %r11
7919 CFI_ADJUST_CFA_OFFSET 8
7920 CFI_REL_OFFSET rip,0
7921 pushq %rax
7922 CFI_ADJUST_CFA_OFFSET 8
7923 cld
7924 SAVE_ARGS 0,0,1
7925+ pax_enter_kernel_user
7926+ /*
7927+ * No need to follow this irqs on/off section: the syscall
7928+ * disabled irqs, here we enable it straight after entry:
7929+ */
7930+ ENABLE_INTERRUPTS(CLBR_NONE)
7931 /* no need to do an access_ok check here because rbp has been
7932 32bit zero extended */
7933+
7934+#ifdef CONFIG_PAX_MEMORY_UDEREF
7935+ mov $PAX_USER_SHADOW_BASE,%r11
7936+ add %r11,%rbp
7937+#endif
7938+
7939 1: movl (%rbp),%ebp
7940 .section __ex_table,"a"
7941 .quad 1b,ia32_badarg
7942 .previous
7943- GET_THREAD_INFO(%r10)
7944- orl $TS_COMPAT,TI_status(%r10)
7945- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7946+ GET_THREAD_INFO(%r11)
7947+ orl $TS_COMPAT,TI_status(%r11)
7948+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7949 CFI_REMEMBER_STATE
7950 jnz sysenter_tracesys
7951 cmpq $(IA32_NR_syscalls-1),%rax
7952@@ -166,13 +199,15 @@ sysenter_do_call:
7953 sysenter_dispatch:
7954 call *ia32_sys_call_table(,%rax,8)
7955 movq %rax,RAX-ARGOFFSET(%rsp)
7956- GET_THREAD_INFO(%r10)
7957+ GET_THREAD_INFO(%r11)
7958 DISABLE_INTERRUPTS(CLBR_NONE)
7959 TRACE_IRQS_OFF
7960- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7961+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7962 jnz sysexit_audit
7963 sysexit_from_sys_call:
7964- andl $~TS_COMPAT,TI_status(%r10)
7965+ pax_exit_kernel_user
7966+ pax_erase_kstack
7967+ andl $~TS_COMPAT,TI_status(%r11)
7968 /* clear IF, that popfq doesn't enable interrupts early */
7969 andl $~0x200,EFLAGS-R11(%rsp)
7970 movl RIP-R11(%rsp),%edx /* User %eip */
7971@@ -200,6 +235,9 @@ sysexit_from_sys_call:
7972 movl %eax,%esi /* 2nd arg: syscall number */
7973 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
7974 call audit_syscall_entry
7975+
7976+ pax_erase_kstack
7977+
7978 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
7979 cmpq $(IA32_NR_syscalls-1),%rax
7980 ja ia32_badsys
7981@@ -211,7 +249,7 @@ sysexit_from_sys_call:
7982 .endm
7983
7984 .macro auditsys_exit exit
7985- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7986+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7987 jnz ia32_ret_from_sys_call
7988 TRACE_IRQS_ON
7989 sti
7990@@ -221,12 +259,12 @@ sysexit_from_sys_call:
7991 movzbl %al,%edi /* zero-extend that into %edi */
7992 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
7993 call audit_syscall_exit
7994- GET_THREAD_INFO(%r10)
7995+ GET_THREAD_INFO(%r11)
7996 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
7997 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
7998 cli
7999 TRACE_IRQS_OFF
8000- testl %edi,TI_flags(%r10)
8001+ testl %edi,TI_flags(%r11)
8002 jz \exit
8003 CLEAR_RREGS -ARGOFFSET
8004 jmp int_with_check
8005@@ -244,7 +282,7 @@ sysexit_audit:
8006
8007 sysenter_tracesys:
8008 #ifdef CONFIG_AUDITSYSCALL
8009- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8010+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8011 jz sysenter_auditsys
8012 #endif
8013 SAVE_REST
8014@@ -252,6 +290,9 @@ sysenter_tracesys:
8015 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8016 movq %rsp,%rdi /* &pt_regs -> arg1 */
8017 call syscall_trace_enter
8018+
8019+ pax_erase_kstack
8020+
8021 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8022 RESTORE_REST
8023 cmpq $(IA32_NR_syscalls-1),%rax
8024@@ -283,19 +324,20 @@ ENDPROC(ia32_sysenter_target)
8025 ENTRY(ia32_cstar_target)
8026 CFI_STARTPROC32 simple
8027 CFI_SIGNAL_FRAME
8028- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8029+ CFI_DEF_CFA rsp,0
8030 CFI_REGISTER rip,rcx
8031 /*CFI_REGISTER rflags,r11*/
8032 SWAPGS_UNSAFE_STACK
8033 movl %esp,%r8d
8034 CFI_REGISTER rsp,r8
8035 movq PER_CPU_VAR(kernel_stack),%rsp
8036+ SAVE_ARGS 8*6,1,1
8037+ pax_enter_kernel_user
8038 /*
8039 * No need to follow this irqs on/off section: the syscall
8040 * disabled irqs and here we enable it straight after entry:
8041 */
8042 ENABLE_INTERRUPTS(CLBR_NONE)
8043- SAVE_ARGS 8,1,1
8044 movl %eax,%eax /* zero extension */
8045 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8046 movq %rcx,RIP-ARGOFFSET(%rsp)
8047@@ -311,13 +353,19 @@ ENTRY(ia32_cstar_target)
8048 /* no need to do an access_ok check here because r8 has been
8049 32bit zero extended */
8050 /* hardware stack frame is complete now */
8051+
8052+#ifdef CONFIG_PAX_MEMORY_UDEREF
8053+ mov $PAX_USER_SHADOW_BASE,%r11
8054+ add %r11,%r8
8055+#endif
8056+
8057 1: movl (%r8),%r9d
8058 .section __ex_table,"a"
8059 .quad 1b,ia32_badarg
8060 .previous
8061- GET_THREAD_INFO(%r10)
8062- orl $TS_COMPAT,TI_status(%r10)
8063- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8064+ GET_THREAD_INFO(%r11)
8065+ orl $TS_COMPAT,TI_status(%r11)
8066+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8067 CFI_REMEMBER_STATE
8068 jnz cstar_tracesys
8069 cmpq $IA32_NR_syscalls-1,%rax
8070@@ -327,13 +375,15 @@ cstar_do_call:
8071 cstar_dispatch:
8072 call *ia32_sys_call_table(,%rax,8)
8073 movq %rax,RAX-ARGOFFSET(%rsp)
8074- GET_THREAD_INFO(%r10)
8075+ GET_THREAD_INFO(%r11)
8076 DISABLE_INTERRUPTS(CLBR_NONE)
8077 TRACE_IRQS_OFF
8078- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8079+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8080 jnz sysretl_audit
8081 sysretl_from_sys_call:
8082- andl $~TS_COMPAT,TI_status(%r10)
8083+ pax_exit_kernel_user
8084+ pax_erase_kstack
8085+ andl $~TS_COMPAT,TI_status(%r11)
8086 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8087 movl RIP-ARGOFFSET(%rsp),%ecx
8088 CFI_REGISTER rip,rcx
8089@@ -361,7 +411,7 @@ sysretl_audit:
8090
8091 cstar_tracesys:
8092 #ifdef CONFIG_AUDITSYSCALL
8093- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8094+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8095 jz cstar_auditsys
8096 #endif
8097 xchgl %r9d,%ebp
8098@@ -370,6 +420,9 @@ cstar_tracesys:
8099 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8100 movq %rsp,%rdi /* &pt_regs -> arg1 */
8101 call syscall_trace_enter
8102+
8103+ pax_erase_kstack
8104+
8105 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8106 RESTORE_REST
8107 xchgl %ebp,%r9d
8108@@ -415,11 +468,6 @@ ENTRY(ia32_syscall)
8109 CFI_REL_OFFSET rip,RIP-RIP
8110 PARAVIRT_ADJUST_EXCEPTION_FRAME
8111 SWAPGS
8112- /*
8113- * No need to follow this irqs on/off section: the syscall
8114- * disabled irqs and here we enable it straight after entry:
8115- */
8116- ENABLE_INTERRUPTS(CLBR_NONE)
8117 movl %eax,%eax
8118 pushq %rax
8119 CFI_ADJUST_CFA_OFFSET 8
8120@@ -427,9 +475,15 @@ ENTRY(ia32_syscall)
8121 /* note the registers are not zero extended to the sf.
8122 this could be a problem. */
8123 SAVE_ARGS 0,0,1
8124- GET_THREAD_INFO(%r10)
8125- orl $TS_COMPAT,TI_status(%r10)
8126- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8127+ pax_enter_kernel_user
8128+ /*
8129+ * No need to follow this irqs on/off section: the syscall
8130+ * disabled irqs and here we enable it straight after entry:
8131+ */
8132+ ENABLE_INTERRUPTS(CLBR_NONE)
8133+ GET_THREAD_INFO(%r11)
8134+ orl $TS_COMPAT,TI_status(%r11)
8135+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8136 jnz ia32_tracesys
8137 cmpq $(IA32_NR_syscalls-1),%rax
8138 ja ia32_badsys
8139@@ -448,6 +502,9 @@ ia32_tracesys:
8140 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8141 movq %rsp,%rdi /* &pt_regs -> arg1 */
8142 call syscall_trace_enter
8143+
8144+ pax_erase_kstack
8145+
8146 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8147 RESTORE_REST
8148 cmpq $(IA32_NR_syscalls-1),%rax
8149@@ -462,6 +519,7 @@ ia32_badsys:
8150
8151 quiet_ni_syscall:
8152 movq $-ENOSYS,%rax
8153+ pax_force_retaddr
8154 ret
8155 CFI_ENDPROC
8156
8157diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8158index 016218c..47ccbdd 100644
8159--- a/arch/x86/ia32/sys_ia32.c
8160+++ b/arch/x86/ia32/sys_ia32.c
8161@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8162 */
8163 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8164 {
8165- typeof(ubuf->st_uid) uid = 0;
8166- typeof(ubuf->st_gid) gid = 0;
8167+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
8168+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
8169 SET_UID(uid, stat->uid);
8170 SET_GID(gid, stat->gid);
8171 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8172@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8173 }
8174 set_fs(KERNEL_DS);
8175 ret = sys_rt_sigprocmask(how,
8176- set ? (sigset_t __user *)&s : NULL,
8177- oset ? (sigset_t __user *)&s : NULL,
8178+ set ? (sigset_t __force_user *)&s : NULL,
8179+ oset ? (sigset_t __force_user *)&s : NULL,
8180 sigsetsize);
8181 set_fs(old_fs);
8182 if (ret)
8183@@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8184 mm_segment_t old_fs = get_fs();
8185
8186 set_fs(KERNEL_DS);
8187- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8188+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8189 set_fs(old_fs);
8190 if (put_compat_timespec(&t, interval))
8191 return -EFAULT;
8192@@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8193 mm_segment_t old_fs = get_fs();
8194
8195 set_fs(KERNEL_DS);
8196- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8197+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8198 set_fs(old_fs);
8199 if (!ret) {
8200 switch (_NSIG_WORDS) {
8201@@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8202 if (copy_siginfo_from_user32(&info, uinfo))
8203 return -EFAULT;
8204 set_fs(KERNEL_DS);
8205- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8206+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8207 set_fs(old_fs);
8208 return ret;
8209 }
8210@@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8211 return -EFAULT;
8212
8213 set_fs(KERNEL_DS);
8214- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8215+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8216 count);
8217 set_fs(old_fs);
8218
8219diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8220index e2077d3..e134a5e 100644
8221--- a/arch/x86/include/asm/alternative-asm.h
8222+++ b/arch/x86/include/asm/alternative-asm.h
8223@@ -19,4 +19,43 @@
8224 .endm
8225 #endif
8226
8227+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
8228+ .macro pax_force_retaddr_bts rip=0
8229+ btsq $63,\rip(%rsp)
8230+ .endm
8231+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8232+ .macro pax_force_retaddr rip=0, reload=0
8233+ btsq $63,\rip(%rsp)
8234+ .endm
8235+ .macro pax_force_fptr ptr
8236+ btsq $63,\ptr
8237+ .endm
8238+ .macro pax_set_fptr_mask
8239+ .endm
8240+#endif
8241+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8242+ .macro pax_force_retaddr rip=0, reload=0
8243+ .if \reload
8244+ pax_set_fptr_mask
8245+ .endif
8246+ orq %r10,\rip(%rsp)
8247+ .endm
8248+ .macro pax_force_fptr ptr
8249+ orq %r10,\ptr
8250+ .endm
8251+ .macro pax_set_fptr_mask
8252+ movabs $0x8000000000000000,%r10
8253+ .endm
8254+#endif
8255+#else
8256+ .macro pax_force_retaddr rip=0, reload=0
8257+ .endm
8258+ .macro pax_force_fptr ptr
8259+ .endm
8260+ .macro pax_force_retaddr_bts rip=0
8261+ .endm
8262+ .macro pax_set_fptr_mask
8263+ .endm
8264+#endif
8265+
8266 #endif /* __ASSEMBLY__ */
8267diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8268index c240efc..fdfadf3 100644
8269--- a/arch/x86/include/asm/alternative.h
8270+++ b/arch/x86/include/asm/alternative.h
8271@@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8272 " .byte 662b-661b\n" /* sourcelen */ \
8273 " .byte 664f-663f\n" /* replacementlen */ \
8274 ".previous\n" \
8275- ".section .altinstr_replacement, \"ax\"\n" \
8276+ ".section .altinstr_replacement, \"a\"\n" \
8277 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8278 ".previous"
8279
8280diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8281index 474d80d..1f97d58 100644
8282--- a/arch/x86/include/asm/apic.h
8283+++ b/arch/x86/include/asm/apic.h
8284@@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8285
8286 #ifdef CONFIG_X86_LOCAL_APIC
8287
8288-extern unsigned int apic_verbosity;
8289+extern int apic_verbosity;
8290 extern int local_apic_timer_c2_ok;
8291
8292 extern int disable_apic;
8293diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8294index 20370c6..a2eb9b0 100644
8295--- a/arch/x86/include/asm/apm.h
8296+++ b/arch/x86/include/asm/apm.h
8297@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8298 __asm__ __volatile__(APM_DO_ZERO_SEGS
8299 "pushl %%edi\n\t"
8300 "pushl %%ebp\n\t"
8301- "lcall *%%cs:apm_bios_entry\n\t"
8302+ "lcall *%%ss:apm_bios_entry\n\t"
8303 "setc %%al\n\t"
8304 "popl %%ebp\n\t"
8305 "popl %%edi\n\t"
8306@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8307 __asm__ __volatile__(APM_DO_ZERO_SEGS
8308 "pushl %%edi\n\t"
8309 "pushl %%ebp\n\t"
8310- "lcall *%%cs:apm_bios_entry\n\t"
8311+ "lcall *%%ss:apm_bios_entry\n\t"
8312 "setc %%bl\n\t"
8313 "popl %%ebp\n\t"
8314 "popl %%edi\n\t"
8315diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
8316index dc5a667..fbed878 100644
8317--- a/arch/x86/include/asm/atomic_32.h
8318+++ b/arch/x86/include/asm/atomic_32.h
8319@@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
8320 }
8321
8322 /**
8323+ * atomic_read_unchecked - read atomic variable
8324+ * @v: pointer of type atomic_unchecked_t
8325+ *
8326+ * Atomically reads the value of @v.
8327+ */
8328+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8329+{
8330+ return v->counter;
8331+}
8332+
8333+/**
8334 * atomic_set - set atomic variable
8335 * @v: pointer of type atomic_t
8336 * @i: required value
8337@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
8338 }
8339
8340 /**
8341+ * atomic_set_unchecked - set atomic variable
8342+ * @v: pointer of type atomic_unchecked_t
8343+ * @i: required value
8344+ *
8345+ * Atomically sets the value of @v to @i.
8346+ */
8347+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8348+{
8349+ v->counter = i;
8350+}
8351+
8352+/**
8353 * atomic_add - add integer to atomic variable
8354 * @i: integer value to add
8355 * @v: pointer of type atomic_t
8356@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
8357 */
8358 static inline void atomic_add(int i, atomic_t *v)
8359 {
8360- asm volatile(LOCK_PREFIX "addl %1,%0"
8361+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8362+
8363+#ifdef CONFIG_PAX_REFCOUNT
8364+ "jno 0f\n"
8365+ LOCK_PREFIX "subl %1,%0\n"
8366+ "int $4\n0:\n"
8367+ _ASM_EXTABLE(0b, 0b)
8368+#endif
8369+
8370+ : "+m" (v->counter)
8371+ : "ir" (i));
8372+}
8373+
8374+/**
8375+ * atomic_add_unchecked - add integer to atomic variable
8376+ * @i: integer value to add
8377+ * @v: pointer of type atomic_unchecked_t
8378+ *
8379+ * Atomically adds @i to @v.
8380+ */
8381+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8382+{
8383+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8384 : "+m" (v->counter)
8385 : "ir" (i));
8386 }
8387@@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
8388 */
8389 static inline void atomic_sub(int i, atomic_t *v)
8390 {
8391- asm volatile(LOCK_PREFIX "subl %1,%0"
8392+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8393+
8394+#ifdef CONFIG_PAX_REFCOUNT
8395+ "jno 0f\n"
8396+ LOCK_PREFIX "addl %1,%0\n"
8397+ "int $4\n0:\n"
8398+ _ASM_EXTABLE(0b, 0b)
8399+#endif
8400+
8401+ : "+m" (v->counter)
8402+ : "ir" (i));
8403+}
8404+
8405+/**
8406+ * atomic_sub_unchecked - subtract integer from atomic variable
8407+ * @i: integer value to subtract
8408+ * @v: pointer of type atomic_unchecked_t
8409+ *
8410+ * Atomically subtracts @i from @v.
8411+ */
8412+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8413+{
8414+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8415 : "+m" (v->counter)
8416 : "ir" (i));
8417 }
8418@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8419 {
8420 unsigned char c;
8421
8422- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8423+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8424+
8425+#ifdef CONFIG_PAX_REFCOUNT
8426+ "jno 0f\n"
8427+ LOCK_PREFIX "addl %2,%0\n"
8428+ "int $4\n0:\n"
8429+ _ASM_EXTABLE(0b, 0b)
8430+#endif
8431+
8432+ "sete %1\n"
8433 : "+m" (v->counter), "=qm" (c)
8434 : "ir" (i) : "memory");
8435 return c;
8436@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8437 */
8438 static inline void atomic_inc(atomic_t *v)
8439 {
8440- asm volatile(LOCK_PREFIX "incl %0"
8441+ asm volatile(LOCK_PREFIX "incl %0\n"
8442+
8443+#ifdef CONFIG_PAX_REFCOUNT
8444+ "jno 0f\n"
8445+ LOCK_PREFIX "decl %0\n"
8446+ "int $4\n0:\n"
8447+ _ASM_EXTABLE(0b, 0b)
8448+#endif
8449+
8450+ : "+m" (v->counter));
8451+}
8452+
8453+/**
8454+ * atomic_inc_unchecked - increment atomic variable
8455+ * @v: pointer of type atomic_unchecked_t
8456+ *
8457+ * Atomically increments @v by 1.
8458+ */
8459+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8460+{
8461+ asm volatile(LOCK_PREFIX "incl %0\n"
8462 : "+m" (v->counter));
8463 }
8464
8465@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
8466 */
8467 static inline void atomic_dec(atomic_t *v)
8468 {
8469- asm volatile(LOCK_PREFIX "decl %0"
8470+ asm volatile(LOCK_PREFIX "decl %0\n"
8471+
8472+#ifdef CONFIG_PAX_REFCOUNT
8473+ "jno 0f\n"
8474+ LOCK_PREFIX "incl %0\n"
8475+ "int $4\n0:\n"
8476+ _ASM_EXTABLE(0b, 0b)
8477+#endif
8478+
8479+ : "+m" (v->counter));
8480+}
8481+
8482+/**
8483+ * atomic_dec_unchecked - decrement atomic variable
8484+ * @v: pointer of type atomic_unchecked_t
8485+ *
8486+ * Atomically decrements @v by 1.
8487+ */
8488+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8489+{
8490+ asm volatile(LOCK_PREFIX "decl %0\n"
8491 : "+m" (v->counter));
8492 }
8493
8494@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8495 {
8496 unsigned char c;
8497
8498- asm volatile(LOCK_PREFIX "decl %0; sete %1"
8499+ asm volatile(LOCK_PREFIX "decl %0\n"
8500+
8501+#ifdef CONFIG_PAX_REFCOUNT
8502+ "jno 0f\n"
8503+ LOCK_PREFIX "incl %0\n"
8504+ "int $4\n0:\n"
8505+ _ASM_EXTABLE(0b, 0b)
8506+#endif
8507+
8508+ "sete %1\n"
8509 : "+m" (v->counter), "=qm" (c)
8510 : : "memory");
8511 return c != 0;
8512@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8513 {
8514 unsigned char c;
8515
8516- asm volatile(LOCK_PREFIX "incl %0; sete %1"
8517+ asm volatile(LOCK_PREFIX "incl %0\n"
8518+
8519+#ifdef CONFIG_PAX_REFCOUNT
8520+ "jno 0f\n"
8521+ LOCK_PREFIX "decl %0\n"
8522+ "into\n0:\n"
8523+ _ASM_EXTABLE(0b, 0b)
8524+#endif
8525+
8526+ "sete %1\n"
8527+ : "+m" (v->counter), "=qm" (c)
8528+ : : "memory");
8529+ return c != 0;
8530+}
8531+
8532+/**
8533+ * atomic_inc_and_test_unchecked - increment and test
8534+ * @v: pointer of type atomic_unchecked_t
8535+ *
8536+ * Atomically increments @v by 1
8537+ * and returns true if the result is zero, or false for all
8538+ * other cases.
8539+ */
8540+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8541+{
8542+ unsigned char c;
8543+
8544+ asm volatile(LOCK_PREFIX "incl %0\n"
8545+ "sete %1\n"
8546 : "+m" (v->counter), "=qm" (c)
8547 : : "memory");
8548 return c != 0;
8549@@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8550 {
8551 unsigned char c;
8552
8553- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8554+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
8555+
8556+#ifdef CONFIG_PAX_REFCOUNT
8557+ "jno 0f\n"
8558+ LOCK_PREFIX "subl %2,%0\n"
8559+ "int $4\n0:\n"
8560+ _ASM_EXTABLE(0b, 0b)
8561+#endif
8562+
8563+ "sets %1\n"
8564 : "+m" (v->counter), "=qm" (c)
8565 : "ir" (i) : "memory");
8566 return c;
8567@@ -179,6 +341,46 @@ static inline int atomic_add_return(int i, atomic_t *v)
8568 #endif
8569 /* Modern 486+ processor */
8570 __i = i;
8571+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8572+
8573+#ifdef CONFIG_PAX_REFCOUNT
8574+ "jno 0f\n"
8575+ "movl %0, %1\n"
8576+ "int $4\n0:\n"
8577+ _ASM_EXTABLE(0b, 0b)
8578+#endif
8579+
8580+ : "+r" (i), "+m" (v->counter)
8581+ : : "memory");
8582+ return i + __i;
8583+
8584+#ifdef CONFIG_M386
8585+no_xadd: /* Legacy 386 processor */
8586+ local_irq_save(flags);
8587+ __i = atomic_read(v);
8588+ atomic_set(v, i + __i);
8589+ local_irq_restore(flags);
8590+ return i + __i;
8591+#endif
8592+}
8593+
8594+/**
8595+ * atomic_add_return_unchecked - add integer and return
8596+ * @v: pointer of type atomic_unchecked_t
8597+ * @i: integer value to add
8598+ *
8599+ * Atomically adds @i to @v and returns @i + @v
8600+ */
8601+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8602+{
8603+ int __i;
8604+#ifdef CONFIG_M386
8605+ unsigned long flags;
8606+ if (unlikely(boot_cpu_data.x86 <= 3))
8607+ goto no_xadd;
8608+#endif
8609+ /* Modern 486+ processor */
8610+ __i = i;
8611 asm volatile(LOCK_PREFIX "xaddl %0, %1"
8612 : "+r" (i), "+m" (v->counter)
8613 : : "memory");
8614@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8615 return cmpxchg(&v->counter, old, new);
8616 }
8617
8618+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8619+{
8620+ return cmpxchg(&v->counter, old, new);
8621+}
8622+
8623 static inline int atomic_xchg(atomic_t *v, int new)
8624 {
8625 return xchg(&v->counter, new);
8626 }
8627
8628+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8629+{
8630+ return xchg(&v->counter, new);
8631+}
8632+
8633 /**
8634 * atomic_add_unless - add unless the number is already a given value
8635 * @v: pointer of type atomic_t
8636@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
8637 */
8638 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8639 {
8640- int c, old;
8641+ int c, old, new;
8642 c = atomic_read(v);
8643 for (;;) {
8644- if (unlikely(c == (u)))
8645+ if (unlikely(c == u))
8646 break;
8647- old = atomic_cmpxchg((v), c, c + (a));
8648+
8649+ asm volatile("addl %2,%0\n"
8650+
8651+#ifdef CONFIG_PAX_REFCOUNT
8652+ "jno 0f\n"
8653+ "subl %2,%0\n"
8654+ "int $4\n0:\n"
8655+ _ASM_EXTABLE(0b, 0b)
8656+#endif
8657+
8658+ : "=r" (new)
8659+ : "0" (c), "ir" (a));
8660+
8661+ old = atomic_cmpxchg(v, c, new);
8662 if (likely(old == c))
8663 break;
8664 c = old;
8665 }
8666- return c != (u);
8667+ return c != u;
8668 }
8669
8670 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8671
8672 #define atomic_inc_return(v) (atomic_add_return(1, v))
8673+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8674+{
8675+ return atomic_add_return_unchecked(1, v);
8676+}
8677 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8678
8679 /* These are x86-specific, used by some header files */
8680@@ -266,9 +495,18 @@ typedef struct {
8681 u64 __aligned(8) counter;
8682 } atomic64_t;
8683
8684+#ifdef CONFIG_PAX_REFCOUNT
8685+typedef struct {
8686+ u64 __aligned(8) counter;
8687+} atomic64_unchecked_t;
8688+#else
8689+typedef atomic64_t atomic64_unchecked_t;
8690+#endif
8691+
8692 #define ATOMIC64_INIT(val) { (val) }
8693
8694 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8695+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
8696
8697 /**
8698 * atomic64_xchg - xchg atomic64 variable
8699@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8700 * the old value.
8701 */
8702 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8703+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8704
8705 /**
8706 * atomic64_set - set atomic64 variable
8707@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8708 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
8709
8710 /**
8711+ * atomic64_unchecked_set - set atomic64 variable
8712+ * @ptr: pointer to type atomic64_unchecked_t
8713+ * @new_val: value to assign
8714+ *
8715+ * Atomically sets the value of @ptr to @new_val.
8716+ */
8717+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8718+
8719+/**
8720 * atomic64_read - read atomic64 variable
8721 * @ptr: pointer to type atomic64_t
8722 *
8723@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
8724 return res;
8725 }
8726
8727-extern u64 atomic64_read(atomic64_t *ptr);
8728+/**
8729+ * atomic64_read_unchecked - read atomic64 variable
8730+ * @ptr: pointer to type atomic64_unchecked_t
8731+ *
8732+ * Atomically reads the value of @ptr and returns it.
8733+ */
8734+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
8735+{
8736+ u64 res;
8737+
8738+ /*
8739+ * Note, we inline this atomic64_unchecked_t primitive because
8740+ * it only clobbers EAX/EDX and leaves the others
8741+ * untouched. We also (somewhat subtly) rely on the
8742+ * fact that cmpxchg8b returns the current 64-bit value
8743+ * of the memory location we are touching:
8744+ */
8745+ asm volatile(
8746+ "mov %%ebx, %%eax\n\t"
8747+ "mov %%ecx, %%edx\n\t"
8748+ LOCK_PREFIX "cmpxchg8b %1\n"
8749+ : "=&A" (res)
8750+ : "m" (*ptr)
8751+ );
8752+
8753+ return res;
8754+}
8755
8756 /**
8757 * atomic64_add_return - add and return
8758@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
8759 * Other variants with different arithmetic operators:
8760 */
8761 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
8762+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8763 extern u64 atomic64_inc_return(atomic64_t *ptr);
8764+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
8765 extern u64 atomic64_dec_return(atomic64_t *ptr);
8766+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
8767
8768 /**
8769 * atomic64_add - add integer to atomic64 variable
8770@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
8771 extern void atomic64_add(u64 delta, atomic64_t *ptr);
8772
8773 /**
8774+ * atomic64_add_unchecked - add integer to atomic64 variable
8775+ * @delta: integer value to add
8776+ * @ptr: pointer to type atomic64_unchecked_t
8777+ *
8778+ * Atomically adds @delta to @ptr.
8779+ */
8780+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8781+
8782+/**
8783 * atomic64_sub - subtract the atomic64 variable
8784 * @delta: integer value to subtract
8785 * @ptr: pointer to type atomic64_t
8786@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
8787 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
8788
8789 /**
8790+ * atomic64_sub_unchecked - subtract the atomic64 variable
8791+ * @delta: integer value to subtract
8792+ * @ptr: pointer to type atomic64_unchecked_t
8793+ *
8794+ * Atomically subtracts @delta from @ptr.
8795+ */
8796+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8797+
8798+/**
8799 * atomic64_sub_and_test - subtract value from variable and test result
8800 * @delta: integer value to subtract
8801 * @ptr: pointer to type atomic64_t
8802@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
8803 extern void atomic64_inc(atomic64_t *ptr);
8804
8805 /**
8806+ * atomic64_inc_unchecked - increment atomic64 variable
8807+ * @ptr: pointer to type atomic64_unchecked_t
8808+ *
8809+ * Atomically increments @ptr by 1.
8810+ */
8811+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
8812+
8813+/**
8814 * atomic64_dec - decrement atomic64 variable
8815 * @ptr: pointer to type atomic64_t
8816 *
8817@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
8818 extern void atomic64_dec(atomic64_t *ptr);
8819
8820 /**
8821+ * atomic64_dec_unchecked - decrement atomic64 variable
8822+ * @ptr: pointer to type atomic64_unchecked_t
8823+ *
8824+ * Atomically decrements @ptr by 1.
8825+ */
8826+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
8827+
8828+/**
8829 * atomic64_dec_and_test - decrement and test
8830 * @ptr: pointer to type atomic64_t
8831 *
8832diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
8833index d605dc2..fafd7bd 100644
8834--- a/arch/x86/include/asm/atomic_64.h
8835+++ b/arch/x86/include/asm/atomic_64.h
8836@@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
8837 }
8838
8839 /**
8840+ * atomic_read_unchecked - read atomic variable
8841+ * @v: pointer of type atomic_unchecked_t
8842+ *
8843+ * Atomically reads the value of @v.
8844+ */
8845+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8846+{
8847+ return v->counter;
8848+}
8849+
8850+/**
8851 * atomic_set - set atomic variable
8852 * @v: pointer of type atomic_t
8853 * @i: required value
8854@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
8855 }
8856
8857 /**
8858+ * atomic_set_unchecked - set atomic variable
8859+ * @v: pointer of type atomic_unchecked_t
8860+ * @i: required value
8861+ *
8862+ * Atomically sets the value of @v to @i.
8863+ */
8864+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8865+{
8866+ v->counter = i;
8867+}
8868+
8869+/**
8870 * atomic_add - add integer to atomic variable
8871 * @i: integer value to add
8872 * @v: pointer of type atomic_t
8873@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
8874 */
8875 static inline void atomic_add(int i, atomic_t *v)
8876 {
8877- asm volatile(LOCK_PREFIX "addl %1,%0"
8878+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8879+
8880+#ifdef CONFIG_PAX_REFCOUNT
8881+ "jno 0f\n"
8882+ LOCK_PREFIX "subl %1,%0\n"
8883+ "int $4\n0:\n"
8884+ _ASM_EXTABLE(0b, 0b)
8885+#endif
8886+
8887+ : "=m" (v->counter)
8888+ : "ir" (i), "m" (v->counter));
8889+}
8890+
8891+/**
8892+ * atomic_add_unchecked - add integer to atomic variable
8893+ * @i: integer value to add
8894+ * @v: pointer of type atomic_unchecked_t
8895+ *
8896+ * Atomically adds @i to @v.
8897+ */
8898+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8899+{
8900+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8901 : "=m" (v->counter)
8902 : "ir" (i), "m" (v->counter));
8903 }
8904@@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
8905 */
8906 static inline void atomic_sub(int i, atomic_t *v)
8907 {
8908- asm volatile(LOCK_PREFIX "subl %1,%0"
8909+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8910+
8911+#ifdef CONFIG_PAX_REFCOUNT
8912+ "jno 0f\n"
8913+ LOCK_PREFIX "addl %1,%0\n"
8914+ "int $4\n0:\n"
8915+ _ASM_EXTABLE(0b, 0b)
8916+#endif
8917+
8918+ : "=m" (v->counter)
8919+ : "ir" (i), "m" (v->counter));
8920+}
8921+
8922+/**
8923+ * atomic_sub_unchecked - subtract the atomic variable
8924+ * @i: integer value to subtract
8925+ * @v: pointer of type atomic_unchecked_t
8926+ *
8927+ * Atomically subtracts @i from @v.
8928+ */
8929+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8930+{
8931+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8932 : "=m" (v->counter)
8933 : "ir" (i), "m" (v->counter));
8934 }
8935@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8936 {
8937 unsigned char c;
8938
8939- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8940+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8941+
8942+#ifdef CONFIG_PAX_REFCOUNT
8943+ "jno 0f\n"
8944+ LOCK_PREFIX "addl %2,%0\n"
8945+ "int $4\n0:\n"
8946+ _ASM_EXTABLE(0b, 0b)
8947+#endif
8948+
8949+ "sete %1\n"
8950 : "=m" (v->counter), "=qm" (c)
8951 : "ir" (i), "m" (v->counter) : "memory");
8952 return c;
8953@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8954 */
8955 static inline void atomic_inc(atomic_t *v)
8956 {
8957- asm volatile(LOCK_PREFIX "incl %0"
8958+ asm volatile(LOCK_PREFIX "incl %0\n"
8959+
8960+#ifdef CONFIG_PAX_REFCOUNT
8961+ "jno 0f\n"
8962+ LOCK_PREFIX "decl %0\n"
8963+ "int $4\n0:\n"
8964+ _ASM_EXTABLE(0b, 0b)
8965+#endif
8966+
8967+ : "=m" (v->counter)
8968+ : "m" (v->counter));
8969+}
8970+
8971+/**
8972+ * atomic_inc_unchecked - increment atomic variable
8973+ * @v: pointer of type atomic_unchecked_t
8974+ *
8975+ * Atomically increments @v by 1.
8976+ */
8977+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8978+{
8979+ asm volatile(LOCK_PREFIX "incl %0\n"
8980 : "=m" (v->counter)
8981 : "m" (v->counter));
8982 }
8983@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
8984 */
8985 static inline void atomic_dec(atomic_t *v)
8986 {
8987- asm volatile(LOCK_PREFIX "decl %0"
8988+ asm volatile(LOCK_PREFIX "decl %0\n"
8989+
8990+#ifdef CONFIG_PAX_REFCOUNT
8991+ "jno 0f\n"
8992+ LOCK_PREFIX "incl %0\n"
8993+ "int $4\n0:\n"
8994+ _ASM_EXTABLE(0b, 0b)
8995+#endif
8996+
8997+ : "=m" (v->counter)
8998+ : "m" (v->counter));
8999+}
9000+
9001+/**
9002+ * atomic_dec_unchecked - decrement atomic variable
9003+ * @v: pointer of type atomic_unchecked_t
9004+ *
9005+ * Atomically decrements @v by 1.
9006+ */
9007+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9008+{
9009+ asm volatile(LOCK_PREFIX "decl %0\n"
9010 : "=m" (v->counter)
9011 : "m" (v->counter));
9012 }
9013@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9014 {
9015 unsigned char c;
9016
9017- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9018+ asm volatile(LOCK_PREFIX "decl %0\n"
9019+
9020+#ifdef CONFIG_PAX_REFCOUNT
9021+ "jno 0f\n"
9022+ LOCK_PREFIX "incl %0\n"
9023+ "int $4\n0:\n"
9024+ _ASM_EXTABLE(0b, 0b)
9025+#endif
9026+
9027+ "sete %1\n"
9028 : "=m" (v->counter), "=qm" (c)
9029 : "m" (v->counter) : "memory");
9030 return c != 0;
9031@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9032 {
9033 unsigned char c;
9034
9035- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9036+ asm volatile(LOCK_PREFIX "incl %0\n"
9037+
9038+#ifdef CONFIG_PAX_REFCOUNT
9039+ "jno 0f\n"
9040+ LOCK_PREFIX "decl %0\n"
9041+ "int $4\n0:\n"
9042+ _ASM_EXTABLE(0b, 0b)
9043+#endif
9044+
9045+ "sete %1\n"
9046+ : "=m" (v->counter), "=qm" (c)
9047+ : "m" (v->counter) : "memory");
9048+ return c != 0;
9049+}
9050+
9051+/**
9052+ * atomic_inc_and_test_unchecked - increment and test
9053+ * @v: pointer of type atomic_unchecked_t
9054+ *
9055+ * Atomically increments @v by 1
9056+ * and returns true if the result is zero, or false for all
9057+ * other cases.
9058+ */
9059+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9060+{
9061+ unsigned char c;
9062+
9063+ asm volatile(LOCK_PREFIX "incl %0\n"
9064+ "sete %1\n"
9065 : "=m" (v->counter), "=qm" (c)
9066 : "m" (v->counter) : "memory");
9067 return c != 0;
9068@@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9069 {
9070 unsigned char c;
9071
9072- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9073+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9074+
9075+#ifdef CONFIG_PAX_REFCOUNT
9076+ "jno 0f\n"
9077+ LOCK_PREFIX "subl %2,%0\n"
9078+ "int $4\n0:\n"
9079+ _ASM_EXTABLE(0b, 0b)
9080+#endif
9081+
9082+ "sets %1\n"
9083 : "=m" (v->counter), "=qm" (c)
9084 : "ir" (i), "m" (v->counter) : "memory");
9085 return c;
9086@@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9087 static inline int atomic_add_return(int i, atomic_t *v)
9088 {
9089 int __i = i;
9090- asm volatile(LOCK_PREFIX "xaddl %0, %1"
9091+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9092+
9093+#ifdef CONFIG_PAX_REFCOUNT
9094+ "jno 0f\n"
9095+ "movl %0, %1\n"
9096+ "int $4\n0:\n"
9097+ _ASM_EXTABLE(0b, 0b)
9098+#endif
9099+
9100+ : "+r" (i), "+m" (v->counter)
9101+ : : "memory");
9102+ return i + __i;
9103+}
9104+
9105+/**
9106+ * atomic_add_return_unchecked - add and return
9107+ * @i: integer value to add
9108+ * @v: pointer of type atomic_unchecked_t
9109+ *
9110+ * Atomically adds @i to @v and returns @i + @v
9111+ */
9112+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9113+{
9114+ int __i = i;
9115+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9116 : "+r" (i), "+m" (v->counter)
9117 : : "memory");
9118 return i + __i;
9119@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9120 }
9121
9122 #define atomic_inc_return(v) (atomic_add_return(1, v))
9123+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9124+{
9125+ return atomic_add_return_unchecked(1, v);
9126+}
9127 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9128
9129 /* The 64-bit atomic type */
9130@@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9131 }
9132
9133 /**
9134+ * atomic64_read_unchecked - read atomic64 variable
9135+ * @v: pointer of type atomic64_unchecked_t
9136+ *
9137+ * Atomically reads the value of @v.
9138+ * Doesn't imply a read memory barrier.
9139+ */
9140+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9141+{
9142+ return v->counter;
9143+}
9144+
9145+/**
9146 * atomic64_set - set atomic64 variable
9147 * @v: pointer to type atomic64_t
9148 * @i: required value
9149@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9150 }
9151
9152 /**
9153+ * atomic64_set_unchecked - set atomic64 variable
9154+ * @v: pointer to type atomic64_unchecked_t
9155+ * @i: required value
9156+ *
9157+ * Atomically sets the value of @v to @i.
9158+ */
9159+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9160+{
9161+ v->counter = i;
9162+}
9163+
9164+/**
9165 * atomic64_add - add integer to atomic64 variable
9166 * @i: integer value to add
9167 * @v: pointer to type atomic64_t
9168@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9169 */
9170 static inline void atomic64_add(long i, atomic64_t *v)
9171 {
9172+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
9173+
9174+#ifdef CONFIG_PAX_REFCOUNT
9175+ "jno 0f\n"
9176+ LOCK_PREFIX "subq %1,%0\n"
9177+ "int $4\n0:\n"
9178+ _ASM_EXTABLE(0b, 0b)
9179+#endif
9180+
9181+ : "=m" (v->counter)
9182+ : "er" (i), "m" (v->counter));
9183+}
9184+
9185+/**
9186+ * atomic64_add_unchecked - add integer to atomic64 variable
9187+ * @i: integer value to add
9188+ * @v: pointer to type atomic64_unchecked_t
9189+ *
9190+ * Atomically adds @i to @v.
9191+ */
9192+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9193+{
9194 asm volatile(LOCK_PREFIX "addq %1,%0"
9195 : "=m" (v->counter)
9196 : "er" (i), "m" (v->counter));
9197@@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9198 */
9199 static inline void atomic64_sub(long i, atomic64_t *v)
9200 {
9201- asm volatile(LOCK_PREFIX "subq %1,%0"
9202+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9203+
9204+#ifdef CONFIG_PAX_REFCOUNT
9205+ "jno 0f\n"
9206+ LOCK_PREFIX "addq %1,%0\n"
9207+ "int $4\n0:\n"
9208+ _ASM_EXTABLE(0b, 0b)
9209+#endif
9210+
9211 : "=m" (v->counter)
9212 : "er" (i), "m" (v->counter));
9213 }
9214@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9215 {
9216 unsigned char c;
9217
9218- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9219+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
9220+
9221+#ifdef CONFIG_PAX_REFCOUNT
9222+ "jno 0f\n"
9223+ LOCK_PREFIX "addq %2,%0\n"
9224+ "int $4\n0:\n"
9225+ _ASM_EXTABLE(0b, 0b)
9226+#endif
9227+
9228+ "sete %1\n"
9229 : "=m" (v->counter), "=qm" (c)
9230 : "er" (i), "m" (v->counter) : "memory");
9231 return c;
9232@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9233 */
9234 static inline void atomic64_inc(atomic64_t *v)
9235 {
9236+ asm volatile(LOCK_PREFIX "incq %0\n"
9237+
9238+#ifdef CONFIG_PAX_REFCOUNT
9239+ "jno 0f\n"
9240+ LOCK_PREFIX "decq %0\n"
9241+ "int $4\n0:\n"
9242+ _ASM_EXTABLE(0b, 0b)
9243+#endif
9244+
9245+ : "=m" (v->counter)
9246+ : "m" (v->counter));
9247+}
9248+
9249+/**
9250+ * atomic64_inc_unchecked - increment atomic64 variable
9251+ * @v: pointer to type atomic64_unchecked_t
9252+ *
9253+ * Atomically increments @v by 1.
9254+ */
9255+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9256+{
9257 asm volatile(LOCK_PREFIX "incq %0"
9258 : "=m" (v->counter)
9259 : "m" (v->counter));
9260@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9261 */
9262 static inline void atomic64_dec(atomic64_t *v)
9263 {
9264- asm volatile(LOCK_PREFIX "decq %0"
9265+ asm volatile(LOCK_PREFIX "decq %0\n"
9266+
9267+#ifdef CONFIG_PAX_REFCOUNT
9268+ "jno 0f\n"
9269+ LOCK_PREFIX "incq %0\n"
9270+ "int $4\n0:\n"
9271+ _ASM_EXTABLE(0b, 0b)
9272+#endif
9273+
9274+ : "=m" (v->counter)
9275+ : "m" (v->counter));
9276+}
9277+
9278+/**
9279+ * atomic64_dec_unchecked - decrement atomic64 variable
9280+ * @v: pointer to type atomic64_t
9281+ *
9282+ * Atomically decrements @v by 1.
9283+ */
9284+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9285+{
9286+ asm volatile(LOCK_PREFIX "decq %0\n"
9287 : "=m" (v->counter)
9288 : "m" (v->counter));
9289 }
9290@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9291 {
9292 unsigned char c;
9293
9294- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9295+ asm volatile(LOCK_PREFIX "decq %0\n"
9296+
9297+#ifdef CONFIG_PAX_REFCOUNT
9298+ "jno 0f\n"
9299+ LOCK_PREFIX "incq %0\n"
9300+ "int $4\n0:\n"
9301+ _ASM_EXTABLE(0b, 0b)
9302+#endif
9303+
9304+ "sete %1\n"
9305 : "=m" (v->counter), "=qm" (c)
9306 : "m" (v->counter) : "memory");
9307 return c != 0;
9308@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9309 {
9310 unsigned char c;
9311
9312- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9313+ asm volatile(LOCK_PREFIX "incq %0\n"
9314+
9315+#ifdef CONFIG_PAX_REFCOUNT
9316+ "jno 0f\n"
9317+ LOCK_PREFIX "decq %0\n"
9318+ "int $4\n0:\n"
9319+ _ASM_EXTABLE(0b, 0b)
9320+#endif
9321+
9322+ "sete %1\n"
9323 : "=m" (v->counter), "=qm" (c)
9324 : "m" (v->counter) : "memory");
9325 return c != 0;
9326@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9327 {
9328 unsigned char c;
9329
9330- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9331+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
9332+
9333+#ifdef CONFIG_PAX_REFCOUNT
9334+ "jno 0f\n"
9335+ LOCK_PREFIX "subq %2,%0\n"
9336+ "int $4\n0:\n"
9337+ _ASM_EXTABLE(0b, 0b)
9338+#endif
9339+
9340+ "sets %1\n"
9341 : "=m" (v->counter), "=qm" (c)
9342 : "er" (i), "m" (v->counter) : "memory");
9343 return c;
9344@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9345 static inline long atomic64_add_return(long i, atomic64_t *v)
9346 {
9347 long __i = i;
9348- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
9349+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
9350+
9351+#ifdef CONFIG_PAX_REFCOUNT
9352+ "jno 0f\n"
9353+ "movq %0, %1\n"
9354+ "int $4\n0:\n"
9355+ _ASM_EXTABLE(0b, 0b)
9356+#endif
9357+
9358+ : "+r" (i), "+m" (v->counter)
9359+ : : "memory");
9360+ return i + __i;
9361+}
9362+
9363+/**
9364+ * atomic64_add_return_unchecked - add and return
9365+ * @i: integer value to add
9366+ * @v: pointer to type atomic64_unchecked_t
9367+ *
9368+ * Atomically adds @i to @v and returns @i + @v
9369+ */
9370+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9371+{
9372+ long __i = i;
9373+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
9374 : "+r" (i), "+m" (v->counter)
9375 : : "memory");
9376 return i + __i;
9377@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9378 }
9379
9380 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9381+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9382+{
9383+ return atomic64_add_return_unchecked(1, v);
9384+}
9385 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9386
9387 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9388@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9389 return cmpxchg(&v->counter, old, new);
9390 }
9391
9392+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9393+{
9394+ return cmpxchg(&v->counter, old, new);
9395+}
9396+
9397 static inline long atomic64_xchg(atomic64_t *v, long new)
9398 {
9399 return xchg(&v->counter, new);
9400 }
9401
9402+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9403+{
9404+ return xchg(&v->counter, new);
9405+}
9406+
9407 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
9408 {
9409 return cmpxchg(&v->counter, old, new);
9410 }
9411
9412+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9413+{
9414+ return cmpxchg(&v->counter, old, new);
9415+}
9416+
9417 static inline long atomic_xchg(atomic_t *v, int new)
9418 {
9419 return xchg(&v->counter, new);
9420 }
9421
9422+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9423+{
9424+ return xchg(&v->counter, new);
9425+}
9426+
9427 /**
9428 * atomic_add_unless - add unless the number is a given value
9429 * @v: pointer of type atomic_t
9430@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
9431 */
9432 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9433 {
9434- int c, old;
9435+ int c, old, new;
9436 c = atomic_read(v);
9437 for (;;) {
9438- if (unlikely(c == (u)))
9439+ if (unlikely(c == u))
9440 break;
9441- old = atomic_cmpxchg((v), c, c + (a));
9442+
9443+ asm volatile("addl %2,%0\n"
9444+
9445+#ifdef CONFIG_PAX_REFCOUNT
9446+ "jno 0f\n"
9447+ "subl %2,%0\n"
9448+ "int $4\n0:\n"
9449+ _ASM_EXTABLE(0b, 0b)
9450+#endif
9451+
9452+ : "=r" (new)
9453+ : "0" (c), "ir" (a));
9454+
9455+ old = atomic_cmpxchg(v, c, new);
9456 if (likely(old == c))
9457 break;
9458 c = old;
9459 }
9460- return c != (u);
9461+ return c != u;
9462 }
9463
9464 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9465@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
9466 */
9467 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9468 {
9469- long c, old;
9470+ long c, old, new;
9471 c = atomic64_read(v);
9472 for (;;) {
9473- if (unlikely(c == (u)))
9474+ if (unlikely(c == u))
9475 break;
9476- old = atomic64_cmpxchg((v), c, c + (a));
9477+
9478+ asm volatile("addq %2,%0\n"
9479+
9480+#ifdef CONFIG_PAX_REFCOUNT
9481+ "jno 0f\n"
9482+ "subq %2,%0\n"
9483+ "int $4\n0:\n"
9484+ _ASM_EXTABLE(0b, 0b)
9485+#endif
9486+
9487+ : "=r" (new)
9488+ : "0" (c), "er" (a));
9489+
9490+ old = atomic64_cmpxchg(v, c, new);
9491 if (likely(old == c))
9492 break;
9493 c = old;
9494 }
9495- return c != (u);
9496+ return c != u;
9497 }
9498
9499 /**
9500diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9501index 02b47a6..d5c4b15 100644
9502--- a/arch/x86/include/asm/bitops.h
9503+++ b/arch/x86/include/asm/bitops.h
9504@@ -38,7 +38,7 @@
9505 * a mask operation on a byte.
9506 */
9507 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9508-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9509+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9510 #define CONST_MASK(nr) (1 << ((nr) & 7))
9511
9512 /**
9513diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9514index 7a10659..8bbf355 100644
9515--- a/arch/x86/include/asm/boot.h
9516+++ b/arch/x86/include/asm/boot.h
9517@@ -11,10 +11,15 @@
9518 #include <asm/pgtable_types.h>
9519
9520 /* Physical address where kernel should be loaded. */
9521-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9522+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9523 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9524 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9525
9526+#ifndef __ASSEMBLY__
9527+extern unsigned char __LOAD_PHYSICAL_ADDR[];
9528+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9529+#endif
9530+
9531 /* Minimum kernel alignment, as a power of two */
9532 #ifdef CONFIG_X86_64
9533 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9534diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9535index 549860d..7d45f68 100644
9536--- a/arch/x86/include/asm/cache.h
9537+++ b/arch/x86/include/asm/cache.h
9538@@ -5,9 +5,10 @@
9539
9540 /* L1 cache line size */
9541 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9542-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9543+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9544
9545 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
9546+#define __read_only __attribute__((__section__(".data.read_only")))
9547
9548 #ifdef CONFIG_X86_VSMP
9549 /* vSMP Internode cacheline shift */
9550diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9551index b54f6af..5b376a6 100644
9552--- a/arch/x86/include/asm/cacheflush.h
9553+++ b/arch/x86/include/asm/cacheflush.h
9554@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
9555 static inline unsigned long get_page_memtype(struct page *pg)
9556 {
9557 if (!PageUncached(pg) && !PageWC(pg))
9558- return -1;
9559+ return ~0UL;
9560 else if (!PageUncached(pg) && PageWC(pg))
9561 return _PAGE_CACHE_WC;
9562 else if (PageUncached(pg) && !PageWC(pg))
9563@@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
9564 SetPageWC(pg);
9565 break;
9566 default:
9567- case -1:
9568+ case ~0UL:
9569 ClearPageUncached(pg);
9570 ClearPageWC(pg);
9571 break;
9572diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
9573index 0e63c9a..ab8d972 100644
9574--- a/arch/x86/include/asm/calling.h
9575+++ b/arch/x86/include/asm/calling.h
9576@@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
9577 * for assembly code:
9578 */
9579
9580-#define R15 0
9581-#define R14 8
9582-#define R13 16
9583-#define R12 24
9584-#define RBP 32
9585-#define RBX 40
9586+#define R15 (0)
9587+#define R14 (8)
9588+#define R13 (16)
9589+#define R12 (24)
9590+#define RBP (32)
9591+#define RBX (40)
9592
9593 /* arguments: interrupts/non tracing syscalls only save up to here: */
9594-#define R11 48
9595-#define R10 56
9596-#define R9 64
9597-#define R8 72
9598-#define RAX 80
9599-#define RCX 88
9600-#define RDX 96
9601-#define RSI 104
9602-#define RDI 112
9603-#define ORIG_RAX 120 /* + error_code */
9604+#define R11 (48)
9605+#define R10 (56)
9606+#define R9 (64)
9607+#define R8 (72)
9608+#define RAX (80)
9609+#define RCX (88)
9610+#define RDX (96)
9611+#define RSI (104)
9612+#define RDI (112)
9613+#define ORIG_RAX (120) /* + error_code */
9614 /* end of arguments */
9615
9616 /* cpu exception frame or undefined in case of fast syscall: */
9617-#define RIP 128
9618-#define CS 136
9619-#define EFLAGS 144
9620-#define RSP 152
9621-#define SS 160
9622+#define RIP (128)
9623+#define CS (136)
9624+#define EFLAGS (144)
9625+#define RSP (152)
9626+#define SS (160)
9627
9628 #define ARGOFFSET R11
9629 #define SWFRAME ORIG_RAX
9630diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9631index 46fc474..b02b0f9 100644
9632--- a/arch/x86/include/asm/checksum_32.h
9633+++ b/arch/x86/include/asm/checksum_32.h
9634@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9635 int len, __wsum sum,
9636 int *src_err_ptr, int *dst_err_ptr);
9637
9638+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9639+ int len, __wsum sum,
9640+ int *src_err_ptr, int *dst_err_ptr);
9641+
9642+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9643+ int len, __wsum sum,
9644+ int *src_err_ptr, int *dst_err_ptr);
9645+
9646 /*
9647 * Note: when you get a NULL pointer exception here this means someone
9648 * passed in an incorrect kernel address to one of these functions.
9649@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9650 int *err_ptr)
9651 {
9652 might_sleep();
9653- return csum_partial_copy_generic((__force void *)src, dst,
9654+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
9655 len, sum, err_ptr, NULL);
9656 }
9657
9658@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9659 {
9660 might_sleep();
9661 if (access_ok(VERIFY_WRITE, dst, len))
9662- return csum_partial_copy_generic(src, (__force void *)dst,
9663+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9664 len, sum, NULL, err_ptr);
9665
9666 if (len)
9667diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9668index 617bd56..7b047a1 100644
9669--- a/arch/x86/include/asm/desc.h
9670+++ b/arch/x86/include/asm/desc.h
9671@@ -4,6 +4,7 @@
9672 #include <asm/desc_defs.h>
9673 #include <asm/ldt.h>
9674 #include <asm/mmu.h>
9675+#include <asm/pgtable.h>
9676 #include <linux/smp.h>
9677
9678 static inline void fill_ldt(struct desc_struct *desc,
9679@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
9680 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
9681 desc->type = (info->read_exec_only ^ 1) << 1;
9682 desc->type |= info->contents << 2;
9683+ desc->type |= info->seg_not_present ^ 1;
9684 desc->s = 1;
9685 desc->dpl = 0x3;
9686 desc->p = info->seg_not_present ^ 1;
9687@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
9688 }
9689
9690 extern struct desc_ptr idt_descr;
9691-extern gate_desc idt_table[];
9692-
9693-struct gdt_page {
9694- struct desc_struct gdt[GDT_ENTRIES];
9695-} __attribute__((aligned(PAGE_SIZE)));
9696-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9697+extern gate_desc idt_table[256];
9698
9699+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9700 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9701 {
9702- return per_cpu(gdt_page, cpu).gdt;
9703+ return cpu_gdt_table[cpu];
9704 }
9705
9706 #ifdef CONFIG_X86_64
9707@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9708 unsigned long base, unsigned dpl, unsigned flags,
9709 unsigned short seg)
9710 {
9711- gate->a = (seg << 16) | (base & 0xffff);
9712- gate->b = (base & 0xffff0000) |
9713- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9714+ gate->gate.offset_low = base;
9715+ gate->gate.seg = seg;
9716+ gate->gate.reserved = 0;
9717+ gate->gate.type = type;
9718+ gate->gate.s = 0;
9719+ gate->gate.dpl = dpl;
9720+ gate->gate.p = 1;
9721+ gate->gate.offset_high = base >> 16;
9722 }
9723
9724 #endif
9725@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9726 static inline void native_write_idt_entry(gate_desc *idt, int entry,
9727 const gate_desc *gate)
9728 {
9729+ pax_open_kernel();
9730 memcpy(&idt[entry], gate, sizeof(*gate));
9731+ pax_close_kernel();
9732 }
9733
9734 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
9735 const void *desc)
9736 {
9737+ pax_open_kernel();
9738 memcpy(&ldt[entry], desc, 8);
9739+ pax_close_kernel();
9740 }
9741
9742 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9743@@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9744 size = sizeof(struct desc_struct);
9745 break;
9746 }
9747+
9748+ pax_open_kernel();
9749 memcpy(&gdt[entry], desc, size);
9750+ pax_close_kernel();
9751 }
9752
9753 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9754@@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9755
9756 static inline void native_load_tr_desc(void)
9757 {
9758+ pax_open_kernel();
9759 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9760+ pax_close_kernel();
9761 }
9762
9763 static inline void native_load_gdt(const struct desc_ptr *dtr)
9764@@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9765 unsigned int i;
9766 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9767
9768+ pax_open_kernel();
9769 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9770 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9771+ pax_close_kernel();
9772 }
9773
9774 #define _LDT_empty(info) \
9775@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9776 desc->limit = (limit >> 16) & 0xf;
9777 }
9778
9779-static inline void _set_gate(int gate, unsigned type, void *addr,
9780+static inline void _set_gate(int gate, unsigned type, const void *addr,
9781 unsigned dpl, unsigned ist, unsigned seg)
9782 {
9783 gate_desc s;
9784@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9785 * Pentium F0 0F bugfix can have resulted in the mapped
9786 * IDT being write-protected.
9787 */
9788-static inline void set_intr_gate(unsigned int n, void *addr)
9789+static inline void set_intr_gate(unsigned int n, const void *addr)
9790 {
9791 BUG_ON((unsigned)n > 0xFF);
9792 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9793@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9794 /*
9795 * This routine sets up an interrupt gate at directory privilege level 3.
9796 */
9797-static inline void set_system_intr_gate(unsigned int n, void *addr)
9798+static inline void set_system_intr_gate(unsigned int n, const void *addr)
9799 {
9800 BUG_ON((unsigned)n > 0xFF);
9801 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9802 }
9803
9804-static inline void set_system_trap_gate(unsigned int n, void *addr)
9805+static inline void set_system_trap_gate(unsigned int n, const void *addr)
9806 {
9807 BUG_ON((unsigned)n > 0xFF);
9808 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9809 }
9810
9811-static inline void set_trap_gate(unsigned int n, void *addr)
9812+static inline void set_trap_gate(unsigned int n, const void *addr)
9813 {
9814 BUG_ON((unsigned)n > 0xFF);
9815 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9816@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9817 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9818 {
9819 BUG_ON((unsigned)n > 0xFF);
9820- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9821+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9822 }
9823
9824-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9825+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9826 {
9827 BUG_ON((unsigned)n > 0xFF);
9828 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9829 }
9830
9831-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9832+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9833 {
9834 BUG_ON((unsigned)n > 0xFF);
9835 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9836 }
9837
9838+#ifdef CONFIG_X86_32
9839+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9840+{
9841+ struct desc_struct d;
9842+
9843+ if (likely(limit))
9844+ limit = (limit - 1UL) >> PAGE_SHIFT;
9845+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
9846+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9847+}
9848+#endif
9849+
9850 #endif /* _ASM_X86_DESC_H */
9851diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9852index 9d66848..6b4a691 100644
9853--- a/arch/x86/include/asm/desc_defs.h
9854+++ b/arch/x86/include/asm/desc_defs.h
9855@@ -31,6 +31,12 @@ struct desc_struct {
9856 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9857 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9858 };
9859+ struct {
9860+ u16 offset_low;
9861+ u16 seg;
9862+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9863+ unsigned offset_high: 16;
9864+ } gate;
9865 };
9866 } __attribute__((packed));
9867
9868diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
9869index cee34e9..a7c3fa2 100644
9870--- a/arch/x86/include/asm/device.h
9871+++ b/arch/x86/include/asm/device.h
9872@@ -6,7 +6,7 @@ struct dev_archdata {
9873 void *acpi_handle;
9874 #endif
9875 #ifdef CONFIG_X86_64
9876-struct dma_map_ops *dma_ops;
9877+ const struct dma_map_ops *dma_ops;
9878 #endif
9879 #ifdef CONFIG_DMAR
9880 void *iommu; /* hook for IOMMU specific extension */
9881diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
9882index 6a25d5d..786b202 100644
9883--- a/arch/x86/include/asm/dma-mapping.h
9884+++ b/arch/x86/include/asm/dma-mapping.h
9885@@ -25,9 +25,9 @@ extern int iommu_merge;
9886 extern struct device x86_dma_fallback_dev;
9887 extern int panic_on_overflow;
9888
9889-extern struct dma_map_ops *dma_ops;
9890+extern const struct dma_map_ops *dma_ops;
9891
9892-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9893+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
9894 {
9895 #ifdef CONFIG_X86_32
9896 return dma_ops;
9897@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9898 /* Make sure we keep the same behaviour */
9899 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
9900 {
9901- struct dma_map_ops *ops = get_dma_ops(dev);
9902+ const struct dma_map_ops *ops = get_dma_ops(dev);
9903 if (ops->mapping_error)
9904 return ops->mapping_error(dev, dma_addr);
9905
9906@@ -122,7 +122,7 @@ static inline void *
9907 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9908 gfp_t gfp)
9909 {
9910- struct dma_map_ops *ops = get_dma_ops(dev);
9911+ const struct dma_map_ops *ops = get_dma_ops(dev);
9912 void *memory;
9913
9914 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
9915@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9916 static inline void dma_free_coherent(struct device *dev, size_t size,
9917 void *vaddr, dma_addr_t bus)
9918 {
9919- struct dma_map_ops *ops = get_dma_ops(dev);
9920+ const struct dma_map_ops *ops = get_dma_ops(dev);
9921
9922 WARN_ON(irqs_disabled()); /* for portability */
9923
9924diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9925index 40b4e61..40d8133 100644
9926--- a/arch/x86/include/asm/e820.h
9927+++ b/arch/x86/include/asm/e820.h
9928@@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
9929 #define ISA_END_ADDRESS 0x100000
9930 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
9931
9932-#define BIOS_BEGIN 0x000a0000
9933+#define BIOS_BEGIN 0x000c0000
9934 #define BIOS_END 0x00100000
9935
9936 #ifdef __KERNEL__
9937diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9938index 8ac9d9a..0a6c96e 100644
9939--- a/arch/x86/include/asm/elf.h
9940+++ b/arch/x86/include/asm/elf.h
9941@@ -257,7 +257,25 @@ extern int force_personality32;
9942 the loader. We need to make sure that it is out of the way of the program
9943 that it will "exec", and that there is sufficient room for the brk. */
9944
9945+#ifdef CONFIG_PAX_SEGMEXEC
9946+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9947+#else
9948 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9949+#endif
9950+
9951+#ifdef CONFIG_PAX_ASLR
9952+#ifdef CONFIG_X86_32
9953+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9954+
9955+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9956+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9957+#else
9958+#define PAX_ELF_ET_DYN_BASE 0x400000UL
9959+
9960+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9961+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9962+#endif
9963+#endif
9964
9965 /* This yields a mask that user programs can use to figure out what
9966 instruction set this CPU supports. This could be done in user space,
9967@@ -310,9 +328,7 @@ do { \
9968
9969 #define ARCH_DLINFO \
9970 do { \
9971- if (vdso_enabled) \
9972- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
9973- (unsigned long)current->mm->context.vdso); \
9974+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
9975 } while (0)
9976
9977 #define AT_SYSINFO 32
9978@@ -323,7 +339,7 @@ do { \
9979
9980 #endif /* !CONFIG_X86_32 */
9981
9982-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
9983+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
9984
9985 #define VDSO_ENTRY \
9986 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
9987@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
9988 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
9989 #define compat_arch_setup_additional_pages syscall32_setup_pages
9990
9991-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9992-#define arch_randomize_brk arch_randomize_brk
9993-
9994 #endif /* _ASM_X86_ELF_H */
9995diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
9996index cc70c1c..d96d011 100644
9997--- a/arch/x86/include/asm/emergency-restart.h
9998+++ b/arch/x86/include/asm/emergency-restart.h
9999@@ -15,6 +15,6 @@ enum reboot_type {
10000
10001 extern enum reboot_type reboot_type;
10002
10003-extern void machine_emergency_restart(void);
10004+extern void machine_emergency_restart(void) __noreturn;
10005
10006 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10007diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10008index 1f11ce4..7caabd1 100644
10009--- a/arch/x86/include/asm/futex.h
10010+++ b/arch/x86/include/asm/futex.h
10011@@ -12,16 +12,18 @@
10012 #include <asm/system.h>
10013
10014 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10015+ typecheck(u32 __user *, uaddr); \
10016 asm volatile("1:\t" insn "\n" \
10017 "2:\t.section .fixup,\"ax\"\n" \
10018 "3:\tmov\t%3, %1\n" \
10019 "\tjmp\t2b\n" \
10020 "\t.previous\n" \
10021 _ASM_EXTABLE(1b, 3b) \
10022- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10023+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10024 : "i" (-EFAULT), "0" (oparg), "1" (0))
10025
10026 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10027+ typecheck(u32 __user *, uaddr); \
10028 asm volatile("1:\tmovl %2, %0\n" \
10029 "\tmovl\t%0, %3\n" \
10030 "\t" insn "\n" \
10031@@ -34,10 +36,10 @@
10032 _ASM_EXTABLE(1b, 4b) \
10033 _ASM_EXTABLE(2b, 4b) \
10034 : "=&a" (oldval), "=&r" (ret), \
10035- "+m" (*uaddr), "=&r" (tem) \
10036+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10037 : "r" (oparg), "i" (-EFAULT), "1" (0))
10038
10039-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10040+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10041 {
10042 int op = (encoded_op >> 28) & 7;
10043 int cmp = (encoded_op >> 24) & 15;
10044@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10045
10046 switch (op) {
10047 case FUTEX_OP_SET:
10048- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10049+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10050 break;
10051 case FUTEX_OP_ADD:
10052- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10053+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10054 uaddr, oparg);
10055 break;
10056 case FUTEX_OP_OR:
10057@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10058 return ret;
10059 }
10060
10061-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10062+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10063 int newval)
10064 {
10065
10066@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10067 return -ENOSYS;
10068 #endif
10069
10070- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10071+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10072 return -EFAULT;
10073
10074- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10075+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10076 "2:\t.section .fixup, \"ax\"\n"
10077 "3:\tmov %2, %0\n"
10078 "\tjmp 2b\n"
10079 "\t.previous\n"
10080 _ASM_EXTABLE(1b, 3b)
10081- : "=a" (oldval), "+m" (*uaddr)
10082+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10083 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10084 : "memory"
10085 );
10086diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10087index ba180d9..3bad351 100644
10088--- a/arch/x86/include/asm/hw_irq.h
10089+++ b/arch/x86/include/asm/hw_irq.h
10090@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10091 extern void enable_IO_APIC(void);
10092
10093 /* Statistics */
10094-extern atomic_t irq_err_count;
10095-extern atomic_t irq_mis_count;
10096+extern atomic_unchecked_t irq_err_count;
10097+extern atomic_unchecked_t irq_mis_count;
10098
10099 /* EISA */
10100 extern void eisa_set_level_irq(unsigned int irq);
10101diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10102index 0b20bbb..4cb1396 100644
10103--- a/arch/x86/include/asm/i387.h
10104+++ b/arch/x86/include/asm/i387.h
10105@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10106 {
10107 int err;
10108
10109+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10110+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10111+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10112+#endif
10113+
10114 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10115 "2:\n"
10116 ".section .fixup,\"ax\"\n"
10117@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10118 {
10119 int err;
10120
10121+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10122+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10123+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10124+#endif
10125+
10126 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10127 "2:\n"
10128 ".section .fixup,\"ax\"\n"
10129@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10130 }
10131
10132 /* We need a safe address that is cheap to find and that is already
10133- in L1 during context switch. The best choices are unfortunately
10134- different for UP and SMP */
10135-#ifdef CONFIG_SMP
10136-#define safe_address (__per_cpu_offset[0])
10137-#else
10138-#define safe_address (kstat_cpu(0).cpustat.user)
10139-#endif
10140+ in L1 during context switch. */
10141+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10142
10143 /*
10144 * These must be called with preempt disabled
10145@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10146 struct thread_info *me = current_thread_info();
10147 preempt_disable();
10148 if (me->status & TS_USEDFPU)
10149- __save_init_fpu(me->task);
10150+ __save_init_fpu(current);
10151 else
10152 clts();
10153 }
10154diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10155index a299900..15c5410 100644
10156--- a/arch/x86/include/asm/io_32.h
10157+++ b/arch/x86/include/asm/io_32.h
10158@@ -3,6 +3,7 @@
10159
10160 #include <linux/string.h>
10161 #include <linux/compiler.h>
10162+#include <asm/processor.h>
10163
10164 /*
10165 * This file contains the definitions for the x86 IO instructions
10166@@ -42,6 +43,17 @@
10167
10168 #ifdef __KERNEL__
10169
10170+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10171+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10172+{
10173+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10174+}
10175+
10176+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10177+{
10178+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10179+}
10180+
10181 #include <asm-generic/iomap.h>
10182
10183 #include <linux/vmalloc.h>
10184diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10185index 2440678..c158b88 100644
10186--- a/arch/x86/include/asm/io_64.h
10187+++ b/arch/x86/include/asm/io_64.h
10188@@ -140,6 +140,17 @@ __OUTS(l)
10189
10190 #include <linux/vmalloc.h>
10191
10192+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10193+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10194+{
10195+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10196+}
10197+
10198+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10199+{
10200+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10201+}
10202+
10203 #include <asm-generic/iomap.h>
10204
10205 void __memcpy_fromio(void *, unsigned long, unsigned);
10206diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10207index fd6d21b..8b13915 100644
10208--- a/arch/x86/include/asm/iommu.h
10209+++ b/arch/x86/include/asm/iommu.h
10210@@ -3,7 +3,7 @@
10211
10212 extern void pci_iommu_shutdown(void);
10213 extern void no_iommu_init(void);
10214-extern struct dma_map_ops nommu_dma_ops;
10215+extern const struct dma_map_ops nommu_dma_ops;
10216 extern int force_iommu, no_iommu;
10217 extern int iommu_detected;
10218 extern int iommu_pass_through;
10219diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10220index 9e2b952..557206e 100644
10221--- a/arch/x86/include/asm/irqflags.h
10222+++ b/arch/x86/include/asm/irqflags.h
10223@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10224 sti; \
10225 sysexit
10226
10227+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10228+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10229+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10230+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10231+
10232 #else
10233 #define INTERRUPT_RETURN iret
10234 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10235diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10236index 4fe681d..bb6d40c 100644
10237--- a/arch/x86/include/asm/kprobes.h
10238+++ b/arch/x86/include/asm/kprobes.h
10239@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10240 #define BREAKPOINT_INSTRUCTION 0xcc
10241 #define RELATIVEJUMP_INSTRUCTION 0xe9
10242 #define MAX_INSN_SIZE 16
10243-#define MAX_STACK_SIZE 64
10244-#define MIN_STACK_SIZE(ADDR) \
10245- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10246- THREAD_SIZE - (unsigned long)(ADDR))) \
10247- ? (MAX_STACK_SIZE) \
10248- : (((unsigned long)current_thread_info()) + \
10249- THREAD_SIZE - (unsigned long)(ADDR)))
10250+#define MAX_STACK_SIZE 64UL
10251+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10252
10253 #define flush_insn_slot(p) do { } while (0)
10254
10255diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10256index 08bc2ff..2e88d1f 100644
10257--- a/arch/x86/include/asm/kvm_host.h
10258+++ b/arch/x86/include/asm/kvm_host.h
10259@@ -534,9 +534,9 @@ struct kvm_x86_ops {
10260 bool (*gb_page_enable)(void);
10261
10262 const struct trace_print_flags *exit_reasons_str;
10263-};
10264+} __do_const;
10265
10266-extern struct kvm_x86_ops *kvm_x86_ops;
10267+extern const struct kvm_x86_ops *kvm_x86_ops;
10268
10269 int kvm_mmu_module_init(void);
10270 void kvm_mmu_module_exit(void);
10271diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10272index 47b9b6f..815aaa1 100644
10273--- a/arch/x86/include/asm/local.h
10274+++ b/arch/x86/include/asm/local.h
10275@@ -18,26 +18,58 @@ typedef struct {
10276
10277 static inline void local_inc(local_t *l)
10278 {
10279- asm volatile(_ASM_INC "%0"
10280+ asm volatile(_ASM_INC "%0\n"
10281+
10282+#ifdef CONFIG_PAX_REFCOUNT
10283+ "jno 0f\n"
10284+ _ASM_DEC "%0\n"
10285+ "int $4\n0:\n"
10286+ _ASM_EXTABLE(0b, 0b)
10287+#endif
10288+
10289 : "+m" (l->a.counter));
10290 }
10291
10292 static inline void local_dec(local_t *l)
10293 {
10294- asm volatile(_ASM_DEC "%0"
10295+ asm volatile(_ASM_DEC "%0\n"
10296+
10297+#ifdef CONFIG_PAX_REFCOUNT
10298+ "jno 0f\n"
10299+ _ASM_INC "%0\n"
10300+ "int $4\n0:\n"
10301+ _ASM_EXTABLE(0b, 0b)
10302+#endif
10303+
10304 : "+m" (l->a.counter));
10305 }
10306
10307 static inline void local_add(long i, local_t *l)
10308 {
10309- asm volatile(_ASM_ADD "%1,%0"
10310+ asm volatile(_ASM_ADD "%1,%0\n"
10311+
10312+#ifdef CONFIG_PAX_REFCOUNT
10313+ "jno 0f\n"
10314+ _ASM_SUB "%1,%0\n"
10315+ "int $4\n0:\n"
10316+ _ASM_EXTABLE(0b, 0b)
10317+#endif
10318+
10319 : "+m" (l->a.counter)
10320 : "ir" (i));
10321 }
10322
10323 static inline void local_sub(long i, local_t *l)
10324 {
10325- asm volatile(_ASM_SUB "%1,%0"
10326+ asm volatile(_ASM_SUB "%1,%0\n"
10327+
10328+#ifdef CONFIG_PAX_REFCOUNT
10329+ "jno 0f\n"
10330+ _ASM_ADD "%1,%0\n"
10331+ "int $4\n0:\n"
10332+ _ASM_EXTABLE(0b, 0b)
10333+#endif
10334+
10335 : "+m" (l->a.counter)
10336 : "ir" (i));
10337 }
10338@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10339 {
10340 unsigned char c;
10341
10342- asm volatile(_ASM_SUB "%2,%0; sete %1"
10343+ asm volatile(_ASM_SUB "%2,%0\n"
10344+
10345+#ifdef CONFIG_PAX_REFCOUNT
10346+ "jno 0f\n"
10347+ _ASM_ADD "%2,%0\n"
10348+ "int $4\n0:\n"
10349+ _ASM_EXTABLE(0b, 0b)
10350+#endif
10351+
10352+ "sete %1\n"
10353 : "+m" (l->a.counter), "=qm" (c)
10354 : "ir" (i) : "memory");
10355 return c;
10356@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10357 {
10358 unsigned char c;
10359
10360- asm volatile(_ASM_DEC "%0; sete %1"
10361+ asm volatile(_ASM_DEC "%0\n"
10362+
10363+#ifdef CONFIG_PAX_REFCOUNT
10364+ "jno 0f\n"
10365+ _ASM_INC "%0\n"
10366+ "int $4\n0:\n"
10367+ _ASM_EXTABLE(0b, 0b)
10368+#endif
10369+
10370+ "sete %1\n"
10371 : "+m" (l->a.counter), "=qm" (c)
10372 : : "memory");
10373 return c != 0;
10374@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10375 {
10376 unsigned char c;
10377
10378- asm volatile(_ASM_INC "%0; sete %1"
10379+ asm volatile(_ASM_INC "%0\n"
10380+
10381+#ifdef CONFIG_PAX_REFCOUNT
10382+ "jno 0f\n"
10383+ _ASM_DEC "%0\n"
10384+ "int $4\n0:\n"
10385+ _ASM_EXTABLE(0b, 0b)
10386+#endif
10387+
10388+ "sete %1\n"
10389 : "+m" (l->a.counter), "=qm" (c)
10390 : : "memory");
10391 return c != 0;
10392@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10393 {
10394 unsigned char c;
10395
10396- asm volatile(_ASM_ADD "%2,%0; sets %1"
10397+ asm volatile(_ASM_ADD "%2,%0\n"
10398+
10399+#ifdef CONFIG_PAX_REFCOUNT
10400+ "jno 0f\n"
10401+ _ASM_SUB "%2,%0\n"
10402+ "int $4\n0:\n"
10403+ _ASM_EXTABLE(0b, 0b)
10404+#endif
10405+
10406+ "sets %1\n"
10407 : "+m" (l->a.counter), "=qm" (c)
10408 : "ir" (i) : "memory");
10409 return c;
10410@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10411 #endif
10412 /* Modern 486+ processor */
10413 __i = i;
10414- asm volatile(_ASM_XADD "%0, %1;"
10415+ asm volatile(_ASM_XADD "%0, %1\n"
10416+
10417+#ifdef CONFIG_PAX_REFCOUNT
10418+ "jno 0f\n"
10419+ _ASM_MOV "%0,%1\n"
10420+ "int $4\n0:\n"
10421+ _ASM_EXTABLE(0b, 0b)
10422+#endif
10423+
10424 : "+r" (i), "+m" (l->a.counter)
10425 : : "memory");
10426 return i + __i;
10427diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
10428index ef51b50..514ba37 100644
10429--- a/arch/x86/include/asm/microcode.h
10430+++ b/arch/x86/include/asm/microcode.h
10431@@ -12,13 +12,13 @@ struct device;
10432 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
10433
10434 struct microcode_ops {
10435- enum ucode_state (*request_microcode_user) (int cpu,
10436+ enum ucode_state (* const request_microcode_user) (int cpu,
10437 const void __user *buf, size_t size);
10438
10439- enum ucode_state (*request_microcode_fw) (int cpu,
10440+ enum ucode_state (* const request_microcode_fw) (int cpu,
10441 struct device *device);
10442
10443- void (*microcode_fini_cpu) (int cpu);
10444+ void (* const microcode_fini_cpu) (int cpu);
10445
10446 /*
10447 * The generic 'microcode_core' part guarantees that
10448@@ -38,18 +38,18 @@ struct ucode_cpu_info {
10449 extern struct ucode_cpu_info ucode_cpu_info[];
10450
10451 #ifdef CONFIG_MICROCODE_INTEL
10452-extern struct microcode_ops * __init init_intel_microcode(void);
10453+extern const struct microcode_ops * __init init_intel_microcode(void);
10454 #else
10455-static inline struct microcode_ops * __init init_intel_microcode(void)
10456+static inline const struct microcode_ops * __init init_intel_microcode(void)
10457 {
10458 return NULL;
10459 }
10460 #endif /* CONFIG_MICROCODE_INTEL */
10461
10462 #ifdef CONFIG_MICROCODE_AMD
10463-extern struct microcode_ops * __init init_amd_microcode(void);
10464+extern const struct microcode_ops * __init init_amd_microcode(void);
10465 #else
10466-static inline struct microcode_ops * __init init_amd_microcode(void)
10467+static inline const struct microcode_ops * __init init_amd_microcode(void)
10468 {
10469 return NULL;
10470 }
10471diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10472index 593e51d..fa69c9a 100644
10473--- a/arch/x86/include/asm/mman.h
10474+++ b/arch/x86/include/asm/mman.h
10475@@ -5,4 +5,14 @@
10476
10477 #include <asm-generic/mman.h>
10478
10479+#ifdef __KERNEL__
10480+#ifndef __ASSEMBLY__
10481+#ifdef CONFIG_X86_32
10482+#define arch_mmap_check i386_mmap_check
10483+int i386_mmap_check(unsigned long addr, unsigned long len,
10484+ unsigned long flags);
10485+#endif
10486+#endif
10487+#endif
10488+
10489 #endif /* _ASM_X86_MMAN_H */
10490diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10491index 80a1dee..239c67d 100644
10492--- a/arch/x86/include/asm/mmu.h
10493+++ b/arch/x86/include/asm/mmu.h
10494@@ -9,10 +9,23 @@
10495 * we put the segment information here.
10496 */
10497 typedef struct {
10498- void *ldt;
10499+ struct desc_struct *ldt;
10500 int size;
10501 struct mutex lock;
10502- void *vdso;
10503+ unsigned long vdso;
10504+
10505+#ifdef CONFIG_X86_32
10506+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10507+ unsigned long user_cs_base;
10508+ unsigned long user_cs_limit;
10509+
10510+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10511+ cpumask_t cpu_user_cs_mask;
10512+#endif
10513+
10514+#endif
10515+#endif
10516+
10517 } mm_context_t;
10518
10519 #ifdef CONFIG_SMP
10520diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10521index 8b5393e..8143173 100644
10522--- a/arch/x86/include/asm/mmu_context.h
10523+++ b/arch/x86/include/asm/mmu_context.h
10524@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10525
10526 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10527 {
10528+
10529+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10530+ unsigned int i;
10531+ pgd_t *pgd;
10532+
10533+ pax_open_kernel();
10534+ pgd = get_cpu_pgd(smp_processor_id());
10535+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10536+ set_pgd_batched(pgd+i, native_make_pgd(0));
10537+ pax_close_kernel();
10538+#endif
10539+
10540 #ifdef CONFIG_SMP
10541 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10542 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10543@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10544 struct task_struct *tsk)
10545 {
10546 unsigned cpu = smp_processor_id();
10547+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
10548+ int tlbstate = TLBSTATE_OK;
10549+#endif
10550
10551 if (likely(prev != next)) {
10552 #ifdef CONFIG_SMP
10553+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10554+ tlbstate = percpu_read(cpu_tlbstate.state);
10555+#endif
10556 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10557 percpu_write(cpu_tlbstate.active_mm, next);
10558 #endif
10559 cpumask_set_cpu(cpu, mm_cpumask(next));
10560
10561 /* Re-load page tables */
10562+#ifdef CONFIG_PAX_PER_CPU_PGD
10563+ pax_open_kernel();
10564+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10565+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10566+ pax_close_kernel();
10567+ load_cr3(get_cpu_pgd(cpu));
10568+#else
10569 load_cr3(next->pgd);
10570+#endif
10571
10572 /* stop flush ipis for the previous mm */
10573 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10574@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10575 */
10576 if (unlikely(prev->context.ldt != next->context.ldt))
10577 load_LDT_nolock(&next->context);
10578- }
10579+
10580+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10581+ if (!nx_enabled) {
10582+ smp_mb__before_clear_bit();
10583+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10584+ smp_mb__after_clear_bit();
10585+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10586+ }
10587+#endif
10588+
10589+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10590+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10591+ prev->context.user_cs_limit != next->context.user_cs_limit))
10592+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10593 #ifdef CONFIG_SMP
10594+ else if (unlikely(tlbstate != TLBSTATE_OK))
10595+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10596+#endif
10597+#endif
10598+
10599+ }
10600 else {
10601+
10602+#ifdef CONFIG_PAX_PER_CPU_PGD
10603+ pax_open_kernel();
10604+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10605+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10606+ pax_close_kernel();
10607+ load_cr3(get_cpu_pgd(cpu));
10608+#endif
10609+
10610+#ifdef CONFIG_SMP
10611 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10612 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10613
10614@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10615 * tlb flush IPI delivery. We must reload CR3
10616 * to make sure to use no freed page tables.
10617 */
10618+
10619+#ifndef CONFIG_PAX_PER_CPU_PGD
10620 load_cr3(next->pgd);
10621+#endif
10622+
10623 load_LDT_nolock(&next->context);
10624+
10625+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10626+ if (!nx_enabled)
10627+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10628+#endif
10629+
10630+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10631+#ifdef CONFIG_PAX_PAGEEXEC
10632+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
10633+#endif
10634+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10635+#endif
10636+
10637 }
10638- }
10639 #endif
10640+ }
10641 }
10642
10643 #define activate_mm(prev, next) \
10644diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10645index 3e2ce58..caaf478 100644
10646--- a/arch/x86/include/asm/module.h
10647+++ b/arch/x86/include/asm/module.h
10648@@ -5,6 +5,7 @@
10649
10650 #ifdef CONFIG_X86_64
10651 /* X86_64 does not define MODULE_PROC_FAMILY */
10652+#define MODULE_PROC_FAMILY ""
10653 #elif defined CONFIG_M386
10654 #define MODULE_PROC_FAMILY "386 "
10655 #elif defined CONFIG_M486
10656@@ -59,13 +60,26 @@
10657 #error unknown processor family
10658 #endif
10659
10660-#ifdef CONFIG_X86_32
10661-# ifdef CONFIG_4KSTACKS
10662-# define MODULE_STACKSIZE "4KSTACKS "
10663-# else
10664-# define MODULE_STACKSIZE ""
10665-# endif
10666-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
10667+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
10668+#define MODULE_STACKSIZE "4KSTACKS "
10669+#else
10670+#define MODULE_STACKSIZE ""
10671+#endif
10672+
10673+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10674+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10675+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10676+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10677+#else
10678+#define MODULE_PAX_KERNEXEC ""
10679 #endif
10680
10681+#ifdef CONFIG_PAX_MEMORY_UDEREF
10682+#define MODULE_PAX_UDEREF "UDEREF "
10683+#else
10684+#define MODULE_PAX_UDEREF ""
10685+#endif
10686+
10687+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10688+
10689 #endif /* _ASM_X86_MODULE_H */
10690diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10691index 7639dbf..e08a58c 100644
10692--- a/arch/x86/include/asm/page_64_types.h
10693+++ b/arch/x86/include/asm/page_64_types.h
10694@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10695
10696 /* duplicated to the one in bootmem.h */
10697 extern unsigned long max_pfn;
10698-extern unsigned long phys_base;
10699+extern const unsigned long phys_base;
10700
10701 extern unsigned long __phys_addr(unsigned long);
10702 #define __phys_reloc_hide(x) (x)
10703diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10704index efb3899..ef30687 100644
10705--- a/arch/x86/include/asm/paravirt.h
10706+++ b/arch/x86/include/asm/paravirt.h
10707@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10708 val);
10709 }
10710
10711+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10712+{
10713+ pgdval_t val = native_pgd_val(pgd);
10714+
10715+ if (sizeof(pgdval_t) > sizeof(long))
10716+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10717+ val, (u64)val >> 32);
10718+ else
10719+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10720+ val);
10721+}
10722+
10723 static inline void pgd_clear(pgd_t *pgdp)
10724 {
10725 set_pgd(pgdp, __pgd(0));
10726@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10727 pv_mmu_ops.set_fixmap(idx, phys, flags);
10728 }
10729
10730+#ifdef CONFIG_PAX_KERNEXEC
10731+static inline unsigned long pax_open_kernel(void)
10732+{
10733+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10734+}
10735+
10736+static inline unsigned long pax_close_kernel(void)
10737+{
10738+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10739+}
10740+#else
10741+static inline unsigned long pax_open_kernel(void) { return 0; }
10742+static inline unsigned long pax_close_kernel(void) { return 0; }
10743+#endif
10744+
10745 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10746
10747 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
10748@@ -945,7 +972,7 @@ extern void default_banner(void);
10749
10750 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10751 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10752-#define PARA_INDIRECT(addr) *%cs:addr
10753+#define PARA_INDIRECT(addr) *%ss:addr
10754 #endif
10755
10756 #define INTERRUPT_RETURN \
10757@@ -1022,6 +1049,21 @@ extern void default_banner(void);
10758 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10759 CLBR_NONE, \
10760 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10761+
10762+#define GET_CR0_INTO_RDI \
10763+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10764+ mov %rax,%rdi
10765+
10766+#define SET_RDI_INTO_CR0 \
10767+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10768+
10769+#define GET_CR3_INTO_RDI \
10770+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10771+ mov %rax,%rdi
10772+
10773+#define SET_RDI_INTO_CR3 \
10774+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10775+
10776 #endif /* CONFIG_X86_32 */
10777
10778 #endif /* __ASSEMBLY__ */
10779diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10780index 9357473..aeb2de5 100644
10781--- a/arch/x86/include/asm/paravirt_types.h
10782+++ b/arch/x86/include/asm/paravirt_types.h
10783@@ -78,19 +78,19 @@ struct pv_init_ops {
10784 */
10785 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10786 unsigned long addr, unsigned len);
10787-};
10788+} __no_const;
10789
10790
10791 struct pv_lazy_ops {
10792 /* Set deferred update mode, used for batching operations. */
10793 void (*enter)(void);
10794 void (*leave)(void);
10795-};
10796+} __no_const;
10797
10798 struct pv_time_ops {
10799 unsigned long long (*sched_clock)(void);
10800 unsigned long (*get_tsc_khz)(void);
10801-};
10802+} __no_const;
10803
10804 struct pv_cpu_ops {
10805 /* hooks for various privileged instructions */
10806@@ -186,7 +186,7 @@ struct pv_cpu_ops {
10807
10808 void (*start_context_switch)(struct task_struct *prev);
10809 void (*end_context_switch)(struct task_struct *next);
10810-};
10811+} __no_const;
10812
10813 struct pv_irq_ops {
10814 /*
10815@@ -217,7 +217,7 @@ struct pv_apic_ops {
10816 unsigned long start_eip,
10817 unsigned long start_esp);
10818 #endif
10819-};
10820+} __no_const;
10821
10822 struct pv_mmu_ops {
10823 unsigned long (*read_cr2)(void);
10824@@ -301,6 +301,7 @@ struct pv_mmu_ops {
10825 struct paravirt_callee_save make_pud;
10826
10827 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10828+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10829 #endif /* PAGETABLE_LEVELS == 4 */
10830 #endif /* PAGETABLE_LEVELS >= 3 */
10831
10832@@ -316,6 +317,12 @@ struct pv_mmu_ops {
10833 an mfn. We can tell which is which from the index. */
10834 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10835 phys_addr_t phys, pgprot_t flags);
10836+
10837+#ifdef CONFIG_PAX_KERNEXEC
10838+ unsigned long (*pax_open_kernel)(void);
10839+ unsigned long (*pax_close_kernel)(void);
10840+#endif
10841+
10842 };
10843
10844 struct raw_spinlock;
10845@@ -326,7 +333,7 @@ struct pv_lock_ops {
10846 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
10847 int (*spin_trylock)(struct raw_spinlock *lock);
10848 void (*spin_unlock)(struct raw_spinlock *lock);
10849-};
10850+} __no_const;
10851
10852 /* This contains all the paravirt structures: we get a convenient
10853 * number for each function using the offset which we use to indicate
10854diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
10855index b399988..3f47c38 100644
10856--- a/arch/x86/include/asm/pci_x86.h
10857+++ b/arch/x86/include/asm/pci_x86.h
10858@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
10859 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
10860
10861 struct pci_raw_ops {
10862- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10863+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10864 int reg, int len, u32 *val);
10865- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10866+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10867 int reg, int len, u32 val);
10868 };
10869
10870-extern struct pci_raw_ops *raw_pci_ops;
10871-extern struct pci_raw_ops *raw_pci_ext_ops;
10872+extern const struct pci_raw_ops *raw_pci_ops;
10873+extern const struct pci_raw_ops *raw_pci_ext_ops;
10874
10875-extern struct pci_raw_ops pci_direct_conf1;
10876+extern const struct pci_raw_ops pci_direct_conf1;
10877 extern bool port_cf9_safe;
10878
10879 /* arch_initcall level */
10880diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
10881index b65a36d..50345a4 100644
10882--- a/arch/x86/include/asm/percpu.h
10883+++ b/arch/x86/include/asm/percpu.h
10884@@ -78,6 +78,7 @@ do { \
10885 if (0) { \
10886 T__ tmp__; \
10887 tmp__ = (val); \
10888+ (void)tmp__; \
10889 } \
10890 switch (sizeof(var)) { \
10891 case 1: \
10892diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10893index 271de94..ef944d6 100644
10894--- a/arch/x86/include/asm/pgalloc.h
10895+++ b/arch/x86/include/asm/pgalloc.h
10896@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10897 pmd_t *pmd, pte_t *pte)
10898 {
10899 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10900+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10901+}
10902+
10903+static inline void pmd_populate_user(struct mm_struct *mm,
10904+ pmd_t *pmd, pte_t *pte)
10905+{
10906+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10907 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10908 }
10909
10910diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10911index 2334982..70bc412 100644
10912--- a/arch/x86/include/asm/pgtable-2level.h
10913+++ b/arch/x86/include/asm/pgtable-2level.h
10914@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10915
10916 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10917 {
10918+ pax_open_kernel();
10919 *pmdp = pmd;
10920+ pax_close_kernel();
10921 }
10922
10923 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10924diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10925index 33927d2..ccde329 100644
10926--- a/arch/x86/include/asm/pgtable-3level.h
10927+++ b/arch/x86/include/asm/pgtable-3level.h
10928@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10929
10930 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10931 {
10932+ pax_open_kernel();
10933 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10934+ pax_close_kernel();
10935 }
10936
10937 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10938 {
10939+ pax_open_kernel();
10940 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10941+ pax_close_kernel();
10942 }
10943
10944 /*
10945diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10946index af6fd36..867ff74 100644
10947--- a/arch/x86/include/asm/pgtable.h
10948+++ b/arch/x86/include/asm/pgtable.h
10949@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
10950
10951 #ifndef __PAGETABLE_PUD_FOLDED
10952 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10953+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10954 #define pgd_clear(pgd) native_pgd_clear(pgd)
10955 #endif
10956
10957@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
10958
10959 #define arch_end_context_switch(prev) do {} while(0)
10960
10961+#define pax_open_kernel() native_pax_open_kernel()
10962+#define pax_close_kernel() native_pax_close_kernel()
10963 #endif /* CONFIG_PARAVIRT */
10964
10965+#define __HAVE_ARCH_PAX_OPEN_KERNEL
10966+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10967+
10968+#ifdef CONFIG_PAX_KERNEXEC
10969+static inline unsigned long native_pax_open_kernel(void)
10970+{
10971+ unsigned long cr0;
10972+
10973+ preempt_disable();
10974+ barrier();
10975+ cr0 = read_cr0() ^ X86_CR0_WP;
10976+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
10977+ write_cr0(cr0);
10978+ return cr0 ^ X86_CR0_WP;
10979+}
10980+
10981+static inline unsigned long native_pax_close_kernel(void)
10982+{
10983+ unsigned long cr0;
10984+
10985+ cr0 = read_cr0() ^ X86_CR0_WP;
10986+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
10987+ write_cr0(cr0);
10988+ barrier();
10989+ preempt_enable_no_resched();
10990+ return cr0 ^ X86_CR0_WP;
10991+}
10992+#else
10993+static inline unsigned long native_pax_open_kernel(void) { return 0; }
10994+static inline unsigned long native_pax_close_kernel(void) { return 0; }
10995+#endif
10996+
10997 /*
10998 * The following only work if pte_present() is true.
10999 * Undefined behaviour if not..
11000 */
11001+static inline int pte_user(pte_t pte)
11002+{
11003+ return pte_val(pte) & _PAGE_USER;
11004+}
11005+
11006 static inline int pte_dirty(pte_t pte)
11007 {
11008 return pte_flags(pte) & _PAGE_DIRTY;
11009@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11010 return pte_clear_flags(pte, _PAGE_RW);
11011 }
11012
11013+static inline pte_t pte_mkread(pte_t pte)
11014+{
11015+ return __pte(pte_val(pte) | _PAGE_USER);
11016+}
11017+
11018 static inline pte_t pte_mkexec(pte_t pte)
11019 {
11020- return pte_clear_flags(pte, _PAGE_NX);
11021+#ifdef CONFIG_X86_PAE
11022+ if (__supported_pte_mask & _PAGE_NX)
11023+ return pte_clear_flags(pte, _PAGE_NX);
11024+ else
11025+#endif
11026+ return pte_set_flags(pte, _PAGE_USER);
11027+}
11028+
11029+static inline pte_t pte_exprotect(pte_t pte)
11030+{
11031+#ifdef CONFIG_X86_PAE
11032+ if (__supported_pte_mask & _PAGE_NX)
11033+ return pte_set_flags(pte, _PAGE_NX);
11034+ else
11035+#endif
11036+ return pte_clear_flags(pte, _PAGE_USER);
11037 }
11038
11039 static inline pte_t pte_mkdirty(pte_t pte)
11040@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11041 #endif
11042
11043 #ifndef __ASSEMBLY__
11044+
11045+#ifdef CONFIG_PAX_PER_CPU_PGD
11046+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11047+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11048+{
11049+ return cpu_pgd[cpu];
11050+}
11051+#endif
11052+
11053 #include <linux/mm_types.h>
11054
11055 static inline int pte_none(pte_t pte)
11056@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11057
11058 static inline int pgd_bad(pgd_t pgd)
11059 {
11060- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11061+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11062 }
11063
11064 static inline int pgd_none(pgd_t pgd)
11065@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11066 * pgd_offset() returns a (pgd_t *)
11067 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11068 */
11069-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11070+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11071+
11072+#ifdef CONFIG_PAX_PER_CPU_PGD
11073+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11074+#endif
11075+
11076 /*
11077 * a shortcut which implies the use of the kernel's pgd, instead
11078 * of a process's
11079@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11080 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11081 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11082
11083+#ifdef CONFIG_X86_32
11084+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11085+#else
11086+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11087+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11088+
11089+#ifdef CONFIG_PAX_MEMORY_UDEREF
11090+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11091+#else
11092+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11093+#endif
11094+
11095+#endif
11096+
11097 #ifndef __ASSEMBLY__
11098
11099 extern int direct_gbpages;
11100@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11101 * dst and src can be on the same page, but the range must not overlap,
11102 * and must not cross a page boundary.
11103 */
11104-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11105+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11106 {
11107- memcpy(dst, src, count * sizeof(pgd_t));
11108+ pax_open_kernel();
11109+ while (count--)
11110+ *dst++ = *src++;
11111+ pax_close_kernel();
11112 }
11113
11114+#ifdef CONFIG_PAX_PER_CPU_PGD
11115+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11116+#endif
11117+
11118+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11119+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11120+#else
11121+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11122+#endif
11123
11124 #include <asm-generic/pgtable.h>
11125 #endif /* __ASSEMBLY__ */
11126diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11127index 750f1bf..971e839 100644
11128--- a/arch/x86/include/asm/pgtable_32.h
11129+++ b/arch/x86/include/asm/pgtable_32.h
11130@@ -26,9 +26,6 @@
11131 struct mm_struct;
11132 struct vm_area_struct;
11133
11134-extern pgd_t swapper_pg_dir[1024];
11135-extern pgd_t trampoline_pg_dir[1024];
11136-
11137 static inline void pgtable_cache_init(void) { }
11138 static inline void check_pgt_cache(void) { }
11139 void paging_init(void);
11140@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11141 # include <asm/pgtable-2level.h>
11142 #endif
11143
11144+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11145+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11146+#ifdef CONFIG_X86_PAE
11147+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11148+#endif
11149+
11150 #if defined(CONFIG_HIGHPTE)
11151 #define __KM_PTE \
11152 (in_nmi() ? KM_NMI_PTE : \
11153@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11154 /* Clear a kernel PTE and flush it from the TLB */
11155 #define kpte_clear_flush(ptep, vaddr) \
11156 do { \
11157+ pax_open_kernel(); \
11158 pte_clear(&init_mm, (vaddr), (ptep)); \
11159+ pax_close_kernel(); \
11160 __flush_tlb_one((vaddr)); \
11161 } while (0)
11162
11163@@ -85,6 +90,9 @@ do { \
11164
11165 #endif /* !__ASSEMBLY__ */
11166
11167+#define HAVE_ARCH_UNMAPPED_AREA
11168+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11169+
11170 /*
11171 * kern_addr_valid() is (1) for FLATMEM and (0) for
11172 * SPARSEMEM and DISCONTIGMEM
11173diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11174index 5e67c15..12d5c47 100644
11175--- a/arch/x86/include/asm/pgtable_32_types.h
11176+++ b/arch/x86/include/asm/pgtable_32_types.h
11177@@ -8,7 +8,7 @@
11178 */
11179 #ifdef CONFIG_X86_PAE
11180 # include <asm/pgtable-3level_types.h>
11181-# define PMD_SIZE (1UL << PMD_SHIFT)
11182+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11183 # define PMD_MASK (~(PMD_SIZE - 1))
11184 #else
11185 # include <asm/pgtable-2level_types.h>
11186@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11187 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11188 #endif
11189
11190+#ifdef CONFIG_PAX_KERNEXEC
11191+#ifndef __ASSEMBLY__
11192+extern unsigned char MODULES_EXEC_VADDR[];
11193+extern unsigned char MODULES_EXEC_END[];
11194+#endif
11195+#include <asm/boot.h>
11196+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11197+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11198+#else
11199+#define ktla_ktva(addr) (addr)
11200+#define ktva_ktla(addr) (addr)
11201+#endif
11202+
11203 #define MODULES_VADDR VMALLOC_START
11204 #define MODULES_END VMALLOC_END
11205 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11206diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11207index c57a301..312bdb4 100644
11208--- a/arch/x86/include/asm/pgtable_64.h
11209+++ b/arch/x86/include/asm/pgtable_64.h
11210@@ -16,10 +16,13 @@
11211
11212 extern pud_t level3_kernel_pgt[512];
11213 extern pud_t level3_ident_pgt[512];
11214+extern pud_t level3_vmalloc_pgt[512];
11215+extern pud_t level3_vmemmap_pgt[512];
11216+extern pud_t level2_vmemmap_pgt[512];
11217 extern pmd_t level2_kernel_pgt[512];
11218 extern pmd_t level2_fixmap_pgt[512];
11219-extern pmd_t level2_ident_pgt[512];
11220-extern pgd_t init_level4_pgt[];
11221+extern pmd_t level2_ident_pgt[512*2];
11222+extern pgd_t init_level4_pgt[512];
11223
11224 #define swapper_pg_dir init_level4_pgt
11225
11226@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11227
11228 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11229 {
11230+ pax_open_kernel();
11231 *pmdp = pmd;
11232+ pax_close_kernel();
11233 }
11234
11235 static inline void native_pmd_clear(pmd_t *pmd)
11236@@ -94,6 +99,13 @@ static inline void native_pud_clear(pud_t *pud)
11237
11238 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11239 {
11240+ pax_open_kernel();
11241+ *pgdp = pgd;
11242+ pax_close_kernel();
11243+}
11244+
11245+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11246+{
11247 *pgdp = pgd;
11248 }
11249
11250diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11251index 766ea16..5b96cb3 100644
11252--- a/arch/x86/include/asm/pgtable_64_types.h
11253+++ b/arch/x86/include/asm/pgtable_64_types.h
11254@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11255 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11256 #define MODULES_END _AC(0xffffffffff000000, UL)
11257 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11258+#define MODULES_EXEC_VADDR MODULES_VADDR
11259+#define MODULES_EXEC_END MODULES_END
11260+
11261+#define ktla_ktva(addr) (addr)
11262+#define ktva_ktla(addr) (addr)
11263
11264 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11265diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11266index d1f4a76..2f46ba1 100644
11267--- a/arch/x86/include/asm/pgtable_types.h
11268+++ b/arch/x86/include/asm/pgtable_types.h
11269@@ -16,12 +16,11 @@
11270 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11271 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11272 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11273-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11274+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11275 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11276 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11277 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11278-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11279-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11280+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11281 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11282
11283 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11284@@ -39,7 +38,6 @@
11285 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11286 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11287 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11288-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11289 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11290 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11291 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11292@@ -55,8 +53,10 @@
11293
11294 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11295 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11296-#else
11297+#elif defined(CONFIG_KMEMCHECK)
11298 #define _PAGE_NX (_AT(pteval_t, 0))
11299+#else
11300+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11301 #endif
11302
11303 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11304@@ -93,6 +93,9 @@
11305 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11306 _PAGE_ACCESSED)
11307
11308+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11309+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11310+
11311 #define __PAGE_KERNEL_EXEC \
11312 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11313 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11314@@ -103,8 +106,8 @@
11315 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11316 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11317 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11318-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11319-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
11320+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11321+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
11322 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11323 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
11324 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
11325@@ -163,8 +166,8 @@
11326 * bits are combined, this will alow user to access the high address mapped
11327 * VDSO in the presence of CONFIG_COMPAT_VDSO
11328 */
11329-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11330-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11331+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11332+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11333 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11334 #endif
11335
11336@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11337 {
11338 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11339 }
11340+#endif
11341
11342+#if PAGETABLE_LEVELS == 3
11343+#include <asm-generic/pgtable-nopud.h>
11344+#endif
11345+
11346+#if PAGETABLE_LEVELS == 2
11347+#include <asm-generic/pgtable-nopmd.h>
11348+#endif
11349+
11350+#ifndef __ASSEMBLY__
11351 #if PAGETABLE_LEVELS > 3
11352 typedef struct { pudval_t pud; } pud_t;
11353
11354@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11355 return pud.pud;
11356 }
11357 #else
11358-#include <asm-generic/pgtable-nopud.h>
11359-
11360 static inline pudval_t native_pud_val(pud_t pud)
11361 {
11362 return native_pgd_val(pud.pgd);
11363@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11364 return pmd.pmd;
11365 }
11366 #else
11367-#include <asm-generic/pgtable-nopmd.h>
11368-
11369 static inline pmdval_t native_pmd_val(pmd_t pmd)
11370 {
11371 return native_pgd_val(pmd.pud.pgd);
11372@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
11373
11374 extern pteval_t __supported_pte_mask;
11375 extern void set_nx(void);
11376+
11377+#ifdef CONFIG_X86_32
11378+#ifdef CONFIG_X86_PAE
11379 extern int nx_enabled;
11380+#else
11381+#define nx_enabled (0)
11382+#endif
11383+#else
11384+#define nx_enabled (1)
11385+#endif
11386
11387 #define pgprot_writecombine pgprot_writecombine
11388 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11389diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11390index fa04dea..5f823fc 100644
11391--- a/arch/x86/include/asm/processor.h
11392+++ b/arch/x86/include/asm/processor.h
11393@@ -272,7 +272,7 @@ struct tss_struct {
11394
11395 } ____cacheline_aligned;
11396
11397-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11398+extern struct tss_struct init_tss[NR_CPUS];
11399
11400 /*
11401 * Save the original ist values for checking stack pointers during debugging
11402@@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
11403 */
11404 #define TASK_SIZE PAGE_OFFSET
11405 #define TASK_SIZE_MAX TASK_SIZE
11406+
11407+#ifdef CONFIG_PAX_SEGMEXEC
11408+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11409+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11410+#else
11411 #define STACK_TOP TASK_SIZE
11412-#define STACK_TOP_MAX STACK_TOP
11413+#endif
11414+
11415+#define STACK_TOP_MAX TASK_SIZE
11416
11417 #define INIT_THREAD { \
11418- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11419+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11420 .vm86_info = NULL, \
11421 .sysenter_cs = __KERNEL_CS, \
11422 .io_bitmap_ptr = NULL, \
11423@@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
11424 */
11425 #define INIT_TSS { \
11426 .x86_tss = { \
11427- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11428+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11429 .ss0 = __KERNEL_DS, \
11430 .ss1 = __KERNEL_CS, \
11431 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11432@@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
11433 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11434
11435 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11436-#define KSTK_TOP(info) \
11437-({ \
11438- unsigned long *__ptr = (unsigned long *)(info); \
11439- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11440-})
11441+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11442
11443 /*
11444 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11445@@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11446 #define task_pt_regs(task) \
11447 ({ \
11448 struct pt_regs *__regs__; \
11449- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11450+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11451 __regs__ - 1; \
11452 })
11453
11454@@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11455 /*
11456 * User space process size. 47bits minus one guard page.
11457 */
11458-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11459+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11460
11461 /* This decides where the kernel will search for a free chunk of vm
11462 * space during mmap's.
11463 */
11464 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11465- 0xc0000000 : 0xFFFFe000)
11466+ 0xc0000000 : 0xFFFFf000)
11467
11468 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11469 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11470@@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11471 #define STACK_TOP_MAX TASK_SIZE_MAX
11472
11473 #define INIT_THREAD { \
11474- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11475+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11476 }
11477
11478 #define INIT_TSS { \
11479- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11480+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11481 }
11482
11483 /*
11484@@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11485 */
11486 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11487
11488+#ifdef CONFIG_PAX_SEGMEXEC
11489+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11490+#endif
11491+
11492 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11493
11494 /* Get/set a process' ability to use the timestamp counter instruction */
11495diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11496index 0f0d908..f2e3da2 100644
11497--- a/arch/x86/include/asm/ptrace.h
11498+++ b/arch/x86/include/asm/ptrace.h
11499@@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11500 }
11501
11502 /*
11503- * user_mode_vm(regs) determines whether a register set came from user mode.
11504+ * user_mode(regs) determines whether a register set came from user mode.
11505 * This is true if V8086 mode was enabled OR if the register set was from
11506 * protected mode with RPL-3 CS value. This tricky test checks that with
11507 * one comparison. Many places in the kernel can bypass this full check
11508- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11509+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11510+ * be used.
11511 */
11512-static inline int user_mode(struct pt_regs *regs)
11513+static inline int user_mode_novm(struct pt_regs *regs)
11514 {
11515 #ifdef CONFIG_X86_32
11516 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11517 #else
11518- return !!(regs->cs & 3);
11519+ return !!(regs->cs & SEGMENT_RPL_MASK);
11520 #endif
11521 }
11522
11523-static inline int user_mode_vm(struct pt_regs *regs)
11524+static inline int user_mode(struct pt_regs *regs)
11525 {
11526 #ifdef CONFIG_X86_32
11527 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11528 USER_RPL;
11529 #else
11530- return user_mode(regs);
11531+ return user_mode_novm(regs);
11532 #endif
11533 }
11534
11535diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11536index 562d4fd..6e39df1 100644
11537--- a/arch/x86/include/asm/reboot.h
11538+++ b/arch/x86/include/asm/reboot.h
11539@@ -6,19 +6,19 @@
11540 struct pt_regs;
11541
11542 struct machine_ops {
11543- void (*restart)(char *cmd);
11544- void (*halt)(void);
11545- void (*power_off)(void);
11546+ void (* __noreturn restart)(char *cmd);
11547+ void (* __noreturn halt)(void);
11548+ void (* __noreturn power_off)(void);
11549 void (*shutdown)(void);
11550 void (*crash_shutdown)(struct pt_regs *);
11551- void (*emergency_restart)(void);
11552-};
11553+ void (* __noreturn emergency_restart)(void);
11554+} __no_const;
11555
11556 extern struct machine_ops machine_ops;
11557
11558 void native_machine_crash_shutdown(struct pt_regs *regs);
11559 void native_machine_shutdown(void);
11560-void machine_real_restart(const unsigned char *code, int length);
11561+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
11562
11563 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
11564 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
11565diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11566index 606ede1..dbfff37 100644
11567--- a/arch/x86/include/asm/rwsem.h
11568+++ b/arch/x86/include/asm/rwsem.h
11569@@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11570 {
11571 asm volatile("# beginning down_read\n\t"
11572 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11573+
11574+#ifdef CONFIG_PAX_REFCOUNT
11575+ "jno 0f\n"
11576+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
11577+ "int $4\n0:\n"
11578+ _ASM_EXTABLE(0b, 0b)
11579+#endif
11580+
11581 /* adds 0x00000001, returns the old value */
11582 " jns 1f\n"
11583 " call call_rwsem_down_read_failed\n"
11584@@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11585 "1:\n\t"
11586 " mov %1,%2\n\t"
11587 " add %3,%2\n\t"
11588+
11589+#ifdef CONFIG_PAX_REFCOUNT
11590+ "jno 0f\n"
11591+ "sub %3,%2\n"
11592+ "int $4\n0:\n"
11593+ _ASM_EXTABLE(0b, 0b)
11594+#endif
11595+
11596 " jle 2f\n\t"
11597 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11598 " jnz 1b\n\t"
11599@@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11600 tmp = RWSEM_ACTIVE_WRITE_BIAS;
11601 asm volatile("# beginning down_write\n\t"
11602 LOCK_PREFIX " xadd %1,(%2)\n\t"
11603+
11604+#ifdef CONFIG_PAX_REFCOUNT
11605+ "jno 0f\n"
11606+ "mov %1,(%2)\n"
11607+ "int $4\n0:\n"
11608+ _ASM_EXTABLE(0b, 0b)
11609+#endif
11610+
11611 /* subtract 0x0000ffff, returns the old value */
11612 " test %1,%1\n\t"
11613 /* was the count 0 before? */
11614@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11615 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
11616 asm volatile("# beginning __up_read\n\t"
11617 LOCK_PREFIX " xadd %1,(%2)\n\t"
11618+
11619+#ifdef CONFIG_PAX_REFCOUNT
11620+ "jno 0f\n"
11621+ "mov %1,(%2)\n"
11622+ "int $4\n0:\n"
11623+ _ASM_EXTABLE(0b, 0b)
11624+#endif
11625+
11626 /* subtracts 1, returns the old value */
11627 " jns 1f\n\t"
11628 " call call_rwsem_wake\n"
11629@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11630 rwsem_count_t tmp;
11631 asm volatile("# beginning __up_write\n\t"
11632 LOCK_PREFIX " xadd %1,(%2)\n\t"
11633+
11634+#ifdef CONFIG_PAX_REFCOUNT
11635+ "jno 0f\n"
11636+ "mov %1,(%2)\n"
11637+ "int $4\n0:\n"
11638+ _ASM_EXTABLE(0b, 0b)
11639+#endif
11640+
11641 /* tries to transition
11642 0xffff0001 -> 0x00000000 */
11643 " jz 1f\n"
11644@@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11645 {
11646 asm volatile("# beginning __downgrade_write\n\t"
11647 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11648+
11649+#ifdef CONFIG_PAX_REFCOUNT
11650+ "jno 0f\n"
11651+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11652+ "int $4\n0:\n"
11653+ _ASM_EXTABLE(0b, 0b)
11654+#endif
11655+
11656 /*
11657 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11658 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11659@@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11660 static inline void rwsem_atomic_add(rwsem_count_t delta,
11661 struct rw_semaphore *sem)
11662 {
11663- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11664+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11665+
11666+#ifdef CONFIG_PAX_REFCOUNT
11667+ "jno 0f\n"
11668+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
11669+ "int $4\n0:\n"
11670+ _ASM_EXTABLE(0b, 0b)
11671+#endif
11672+
11673 : "+m" (sem->count)
11674 : "er" (delta));
11675 }
11676@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
11677 {
11678 rwsem_count_t tmp = delta;
11679
11680- asm volatile(LOCK_PREFIX "xadd %0,%1"
11681+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
11682+
11683+#ifdef CONFIG_PAX_REFCOUNT
11684+ "jno 0f\n"
11685+ "mov %0,%1\n"
11686+ "int $4\n0:\n"
11687+ _ASM_EXTABLE(0b, 0b)
11688+#endif
11689+
11690 : "+r" (tmp), "+m" (sem->count)
11691 : : "memory");
11692
11693diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11694index 14e0ed8..7f7dd5e 100644
11695--- a/arch/x86/include/asm/segment.h
11696+++ b/arch/x86/include/asm/segment.h
11697@@ -62,10 +62,15 @@
11698 * 26 - ESPFIX small SS
11699 * 27 - per-cpu [ offset to per-cpu data area ]
11700 * 28 - stack_canary-20 [ for stack protector ]
11701- * 29 - unused
11702- * 30 - unused
11703+ * 29 - PCI BIOS CS
11704+ * 30 - PCI BIOS DS
11705 * 31 - TSS for double fault handler
11706 */
11707+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11708+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11709+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11710+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11711+
11712 #define GDT_ENTRY_TLS_MIN 6
11713 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11714
11715@@ -77,6 +82,8 @@
11716
11717 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
11718
11719+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11720+
11721 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
11722
11723 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
11724@@ -88,7 +95,7 @@
11725 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
11726 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
11727
11728-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11729+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11730 #ifdef CONFIG_SMP
11731 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
11732 #else
11733@@ -102,6 +109,12 @@
11734 #define __KERNEL_STACK_CANARY 0
11735 #endif
11736
11737+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
11738+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11739+
11740+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
11741+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11742+
11743 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11744
11745 /*
11746@@ -139,7 +152,7 @@
11747 */
11748
11749 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11750-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11751+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11752
11753
11754 #else
11755@@ -163,6 +176,8 @@
11756 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
11757 #define __USER32_DS __USER_DS
11758
11759+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11760+
11761 #define GDT_ENTRY_TSS 8 /* needs two entries */
11762 #define GDT_ENTRY_LDT 10 /* needs two entries */
11763 #define GDT_ENTRY_TLS_MIN 12
11764@@ -183,6 +198,7 @@
11765 #endif
11766
11767 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
11768+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
11769 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
11770 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
11771 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
11772diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11773index 4c2f63c..5685db2 100644
11774--- a/arch/x86/include/asm/smp.h
11775+++ b/arch/x86/include/asm/smp.h
11776@@ -24,7 +24,7 @@ extern unsigned int num_processors;
11777 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
11778 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11779 DECLARE_PER_CPU(u16, cpu_llc_id);
11780-DECLARE_PER_CPU(int, cpu_number);
11781+DECLARE_PER_CPU(unsigned int, cpu_number);
11782
11783 static inline struct cpumask *cpu_sibling_mask(int cpu)
11784 {
11785@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
11786 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
11787
11788 /* Static state in head.S used to set up a CPU */
11789-extern struct {
11790- void *sp;
11791- unsigned short ss;
11792-} stack_start;
11793+extern unsigned long stack_start; /* Initial stack pointer address */
11794
11795 struct smp_ops {
11796 void (*smp_prepare_boot_cpu)(void);
11797@@ -60,7 +57,7 @@ struct smp_ops {
11798
11799 void (*send_call_func_ipi)(const struct cpumask *mask);
11800 void (*send_call_func_single_ipi)(int cpu);
11801-};
11802+} __no_const;
11803
11804 /* Globals due to paravirt */
11805 extern void set_cpu_sibling_map(int cpu);
11806@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11807 extern int safe_smp_processor_id(void);
11808
11809 #elif defined(CONFIG_X86_64_SMP)
11810-#define raw_smp_processor_id() (percpu_read(cpu_number))
11811-
11812-#define stack_smp_processor_id() \
11813-({ \
11814- struct thread_info *ti; \
11815- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11816- ti->cpu; \
11817-})
11818+#define raw_smp_processor_id() (percpu_read(cpu_number))
11819+#define stack_smp_processor_id() raw_smp_processor_id()
11820 #define safe_smp_processor_id() smp_processor_id()
11821
11822 #endif
11823diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11824index 4e77853..4359783 100644
11825--- a/arch/x86/include/asm/spinlock.h
11826+++ b/arch/x86/include/asm/spinlock.h
11827@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
11828 static inline void __raw_read_lock(raw_rwlock_t *rw)
11829 {
11830 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
11831+
11832+#ifdef CONFIG_PAX_REFCOUNT
11833+ "jno 0f\n"
11834+ LOCK_PREFIX " addl $1,(%0)\n"
11835+ "int $4\n0:\n"
11836+ _ASM_EXTABLE(0b, 0b)
11837+#endif
11838+
11839 "jns 1f\n"
11840 "call __read_lock_failed\n\t"
11841 "1:\n"
11842@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
11843 static inline void __raw_write_lock(raw_rwlock_t *rw)
11844 {
11845 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
11846+
11847+#ifdef CONFIG_PAX_REFCOUNT
11848+ "jno 0f\n"
11849+ LOCK_PREFIX " addl %1,(%0)\n"
11850+ "int $4\n0:\n"
11851+ _ASM_EXTABLE(0b, 0b)
11852+#endif
11853+
11854 "jz 1f\n"
11855 "call __write_lock_failed\n\t"
11856 "1:\n"
11857@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
11858
11859 static inline void __raw_read_unlock(raw_rwlock_t *rw)
11860 {
11861- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
11862+ asm volatile(LOCK_PREFIX "incl %0\n"
11863+
11864+#ifdef CONFIG_PAX_REFCOUNT
11865+ "jno 0f\n"
11866+ LOCK_PREFIX "decl %0\n"
11867+ "int $4\n0:\n"
11868+ _ASM_EXTABLE(0b, 0b)
11869+#endif
11870+
11871+ :"+m" (rw->lock) : : "memory");
11872 }
11873
11874 static inline void __raw_write_unlock(raw_rwlock_t *rw)
11875 {
11876- asm volatile(LOCK_PREFIX "addl %1, %0"
11877+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
11878+
11879+#ifdef CONFIG_PAX_REFCOUNT
11880+ "jno 0f\n"
11881+ LOCK_PREFIX "subl %1, %0\n"
11882+ "int $4\n0:\n"
11883+ _ASM_EXTABLE(0b, 0b)
11884+#endif
11885+
11886 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
11887 }
11888
11889diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11890index 1575177..cb23f52 100644
11891--- a/arch/x86/include/asm/stackprotector.h
11892+++ b/arch/x86/include/asm/stackprotector.h
11893@@ -48,7 +48,7 @@
11894 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11895 */
11896 #define GDT_STACK_CANARY_INIT \
11897- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11898+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11899
11900 /*
11901 * Initialize the stackprotector canary value.
11902@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11903
11904 static inline void load_stack_canary_segment(void)
11905 {
11906-#ifdef CONFIG_X86_32
11907+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11908 asm volatile ("mov %0, %%gs" : : "r" (0));
11909 #endif
11910 }
11911diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11912index e0fbf29..858ef4a 100644
11913--- a/arch/x86/include/asm/system.h
11914+++ b/arch/x86/include/asm/system.h
11915@@ -132,7 +132,7 @@ do { \
11916 "thread_return:\n\t" \
11917 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11918 __switch_canary \
11919- "movq %P[thread_info](%%rsi),%%r8\n\t" \
11920+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11921 "movq %%rax,%%rdi\n\t" \
11922 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11923 "jnz ret_from_fork\n\t" \
11924@@ -143,7 +143,7 @@ do { \
11925 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11926 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11927 [_tif_fork] "i" (_TIF_FORK), \
11928- [thread_info] "i" (offsetof(struct task_struct, stack)), \
11929+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
11930 [current_task] "m" (per_cpu_var(current_task)) \
11931 __switch_canary_iparam \
11932 : "memory", "cc" __EXTRA_CLOBBER)
11933@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11934 {
11935 unsigned long __limit;
11936 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11937- return __limit + 1;
11938+ return __limit;
11939 }
11940
11941 static inline void native_clts(void)
11942@@ -340,12 +340,12 @@ void enable_hlt(void);
11943
11944 void cpu_idle_wait(void);
11945
11946-extern unsigned long arch_align_stack(unsigned long sp);
11947+#define arch_align_stack(x) ((x) & ~0xfUL)
11948 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11949
11950 void default_idle(void);
11951
11952-void stop_this_cpu(void *dummy);
11953+void stop_this_cpu(void *dummy) __noreturn;
11954
11955 /*
11956 * Force strict CPU ordering.
11957diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11958index 19c3ce4..8962535 100644
11959--- a/arch/x86/include/asm/thread_info.h
11960+++ b/arch/x86/include/asm/thread_info.h
11961@@ -10,6 +10,7 @@
11962 #include <linux/compiler.h>
11963 #include <asm/page.h>
11964 #include <asm/types.h>
11965+#include <asm/percpu.h>
11966
11967 /*
11968 * low level task data that entry.S needs immediate access to
11969@@ -24,7 +25,6 @@ struct exec_domain;
11970 #include <asm/atomic.h>
11971
11972 struct thread_info {
11973- struct task_struct *task; /* main task structure */
11974 struct exec_domain *exec_domain; /* execution domain */
11975 __u32 flags; /* low level flags */
11976 __u32 status; /* thread synchronous flags */
11977@@ -34,18 +34,12 @@ struct thread_info {
11978 mm_segment_t addr_limit;
11979 struct restart_block restart_block;
11980 void __user *sysenter_return;
11981-#ifdef CONFIG_X86_32
11982- unsigned long previous_esp; /* ESP of the previous stack in
11983- case of nested (IRQ) stacks
11984- */
11985- __u8 supervisor_stack[0];
11986-#endif
11987+ unsigned long lowest_stack;
11988 int uaccess_err;
11989 };
11990
11991-#define INIT_THREAD_INFO(tsk) \
11992+#define INIT_THREAD_INFO \
11993 { \
11994- .task = &tsk, \
11995 .exec_domain = &default_exec_domain, \
11996 .flags = 0, \
11997 .cpu = 0, \
11998@@ -56,7 +50,7 @@ struct thread_info {
11999 }, \
12000 }
12001
12002-#define init_thread_info (init_thread_union.thread_info)
12003+#define init_thread_info (init_thread_union.stack)
12004 #define init_stack (init_thread_union.stack)
12005
12006 #else /* !__ASSEMBLY__ */
12007@@ -163,6 +157,23 @@ struct thread_info {
12008 #define alloc_thread_info(tsk) \
12009 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12010
12011+#ifdef __ASSEMBLY__
12012+/* how to get the thread information struct from ASM */
12013+#define GET_THREAD_INFO(reg) \
12014+ mov PER_CPU_VAR(current_tinfo), reg
12015+
12016+/* use this one if reg already contains %esp */
12017+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12018+#else
12019+/* how to get the thread information struct from C */
12020+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12021+
12022+static __always_inline struct thread_info *current_thread_info(void)
12023+{
12024+ return percpu_read_stable(current_tinfo);
12025+}
12026+#endif
12027+
12028 #ifdef CONFIG_X86_32
12029
12030 #define STACK_WARN (THREAD_SIZE/8)
12031@@ -173,35 +184,13 @@ struct thread_info {
12032 */
12033 #ifndef __ASSEMBLY__
12034
12035-
12036 /* how to get the current stack pointer from C */
12037 register unsigned long current_stack_pointer asm("esp") __used;
12038
12039-/* how to get the thread information struct from C */
12040-static inline struct thread_info *current_thread_info(void)
12041-{
12042- return (struct thread_info *)
12043- (current_stack_pointer & ~(THREAD_SIZE - 1));
12044-}
12045-
12046-#else /* !__ASSEMBLY__ */
12047-
12048-/* how to get the thread information struct from ASM */
12049-#define GET_THREAD_INFO(reg) \
12050- movl $-THREAD_SIZE, reg; \
12051- andl %esp, reg
12052-
12053-/* use this one if reg already contains %esp */
12054-#define GET_THREAD_INFO_WITH_ESP(reg) \
12055- andl $-THREAD_SIZE, reg
12056-
12057 #endif
12058
12059 #else /* X86_32 */
12060
12061-#include <asm/percpu.h>
12062-#define KERNEL_STACK_OFFSET (5*8)
12063-
12064 /*
12065 * macros/functions for gaining access to the thread information structure
12066 * preempt_count needs to be 1 initially, until the scheduler is functional.
12067@@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12068 #ifndef __ASSEMBLY__
12069 DECLARE_PER_CPU(unsigned long, kernel_stack);
12070
12071-static inline struct thread_info *current_thread_info(void)
12072-{
12073- struct thread_info *ti;
12074- ti = (void *)(percpu_read_stable(kernel_stack) +
12075- KERNEL_STACK_OFFSET - THREAD_SIZE);
12076- return ti;
12077-}
12078-
12079-#else /* !__ASSEMBLY__ */
12080-
12081-/* how to get the thread information struct from ASM */
12082-#define GET_THREAD_INFO(reg) \
12083- movq PER_CPU_VAR(kernel_stack),reg ; \
12084- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12085-
12086+/* how to get the current stack pointer from C */
12087+register unsigned long current_stack_pointer asm("rsp") __used;
12088 #endif
12089
12090 #endif /* !X86_32 */
12091@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12092 extern void free_thread_info(struct thread_info *ti);
12093 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12094 #define arch_task_cache_init arch_task_cache_init
12095+
12096+#define __HAVE_THREAD_FUNCTIONS
12097+#define task_thread_info(task) (&(task)->tinfo)
12098+#define task_stack_page(task) ((task)->stack)
12099+#define setup_thread_stack(p, org) do {} while (0)
12100+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12101+
12102+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12103+extern struct task_struct *alloc_task_struct(void);
12104+extern void free_task_struct(struct task_struct *);
12105+
12106 #endif
12107 #endif /* _ASM_X86_THREAD_INFO_H */
12108diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12109index 61c5874..8a046e9 100644
12110--- a/arch/x86/include/asm/uaccess.h
12111+++ b/arch/x86/include/asm/uaccess.h
12112@@ -8,12 +8,15 @@
12113 #include <linux/thread_info.h>
12114 #include <linux/prefetch.h>
12115 #include <linux/string.h>
12116+#include <linux/sched.h>
12117 #include <asm/asm.h>
12118 #include <asm/page.h>
12119
12120 #define VERIFY_READ 0
12121 #define VERIFY_WRITE 1
12122
12123+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12124+
12125 /*
12126 * The fs value determines whether argument validity checking should be
12127 * performed or not. If get_fs() == USER_DS, checking is performed, with
12128@@ -29,7 +32,12 @@
12129
12130 #define get_ds() (KERNEL_DS)
12131 #define get_fs() (current_thread_info()->addr_limit)
12132+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12133+void __set_fs(mm_segment_t x);
12134+void set_fs(mm_segment_t x);
12135+#else
12136 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12137+#endif
12138
12139 #define segment_eq(a, b) ((a).seg == (b).seg)
12140
12141@@ -77,7 +85,33 @@
12142 * checks that the pointer is in the user space range - after calling
12143 * this function, memory access functions may still return -EFAULT.
12144 */
12145-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12146+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12147+#define access_ok(type, addr, size) \
12148+({ \
12149+ long __size = size; \
12150+ unsigned long __addr = (unsigned long)addr; \
12151+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12152+ unsigned long __end_ao = __addr + __size - 1; \
12153+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12154+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12155+ while(__addr_ao <= __end_ao) { \
12156+ char __c_ao; \
12157+ __addr_ao += PAGE_SIZE; \
12158+ if (__size > PAGE_SIZE) \
12159+ cond_resched(); \
12160+ if (__get_user(__c_ao, (char __user *)__addr)) \
12161+ break; \
12162+ if (type != VERIFY_WRITE) { \
12163+ __addr = __addr_ao; \
12164+ continue; \
12165+ } \
12166+ if (__put_user(__c_ao, (char __user *)__addr)) \
12167+ break; \
12168+ __addr = __addr_ao; \
12169+ } \
12170+ } \
12171+ __ret_ao; \
12172+})
12173
12174 /*
12175 * The exception table consists of pairs of addresses: the first is the
12176@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12177 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12178 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12179
12180-
12181+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12182+#define __copyuser_seg "gs;"
12183+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12184+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12185+#else
12186+#define __copyuser_seg
12187+#define __COPYUSER_SET_ES
12188+#define __COPYUSER_RESTORE_ES
12189+#endif
12190
12191 #ifdef CONFIG_X86_32
12192 #define __put_user_asm_u64(x, addr, err, errret) \
12193- asm volatile("1: movl %%eax,0(%2)\n" \
12194- "2: movl %%edx,4(%2)\n" \
12195+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12196+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12197 "3:\n" \
12198 ".section .fixup,\"ax\"\n" \
12199 "4: movl %3,%0\n" \
12200@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12201 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12202
12203 #define __put_user_asm_ex_u64(x, addr) \
12204- asm volatile("1: movl %%eax,0(%1)\n" \
12205- "2: movl %%edx,4(%1)\n" \
12206+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12207+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12208 "3:\n" \
12209 _ASM_EXTABLE(1b, 2b - 1b) \
12210 _ASM_EXTABLE(2b, 3b - 2b) \
12211@@ -253,7 +295,7 @@ extern void __put_user_8(void);
12212 __typeof__(*(ptr)) __pu_val; \
12213 __chk_user_ptr(ptr); \
12214 might_fault(); \
12215- __pu_val = x; \
12216+ __pu_val = (x); \
12217 switch (sizeof(*(ptr))) { \
12218 case 1: \
12219 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12220@@ -374,7 +416,7 @@ do { \
12221 } while (0)
12222
12223 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12224- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12225+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12226 "2:\n" \
12227 ".section .fixup,\"ax\"\n" \
12228 "3: mov %3,%0\n" \
12229@@ -382,7 +424,7 @@ do { \
12230 " jmp 2b\n" \
12231 ".previous\n" \
12232 _ASM_EXTABLE(1b, 3b) \
12233- : "=r" (err), ltype(x) \
12234+ : "=r" (err), ltype (x) \
12235 : "m" (__m(addr)), "i" (errret), "0" (err))
12236
12237 #define __get_user_size_ex(x, ptr, size) \
12238@@ -407,7 +449,7 @@ do { \
12239 } while (0)
12240
12241 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12242- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12243+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12244 "2:\n" \
12245 _ASM_EXTABLE(1b, 2b - 1b) \
12246 : ltype(x) : "m" (__m(addr)))
12247@@ -424,13 +466,24 @@ do { \
12248 int __gu_err; \
12249 unsigned long __gu_val; \
12250 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12251- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12252+ (x) = (__typeof__(*(ptr)))__gu_val; \
12253 __gu_err; \
12254 })
12255
12256 /* FIXME: this hack is definitely wrong -AK */
12257 struct __large_struct { unsigned long buf[100]; };
12258-#define __m(x) (*(struct __large_struct __user *)(x))
12259+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12260+#define ____m(x) \
12261+({ \
12262+ unsigned long ____x = (unsigned long)(x); \
12263+ if (____x < PAX_USER_SHADOW_BASE) \
12264+ ____x += PAX_USER_SHADOW_BASE; \
12265+ (void __user *)____x; \
12266+})
12267+#else
12268+#define ____m(x) (x)
12269+#endif
12270+#define __m(x) (*(struct __large_struct __user *)____m(x))
12271
12272 /*
12273 * Tell gcc we read from memory instead of writing: this is because
12274@@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12275 * aliasing issues.
12276 */
12277 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12278- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12279+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12280 "2:\n" \
12281 ".section .fixup,\"ax\"\n" \
12282 "3: mov %3,%0\n" \
12283@@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
12284 ".previous\n" \
12285 _ASM_EXTABLE(1b, 3b) \
12286 : "=r"(err) \
12287- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12288+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12289
12290 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12291- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12292+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12293 "2:\n" \
12294 _ASM_EXTABLE(1b, 2b - 1b) \
12295 : : ltype(x), "m" (__m(addr)))
12296@@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
12297 * On error, the variable @x is set to zero.
12298 */
12299
12300+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12301+#define __get_user(x, ptr) get_user((x), (ptr))
12302+#else
12303 #define __get_user(x, ptr) \
12304 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12305+#endif
12306
12307 /**
12308 * __put_user: - Write a simple value into user space, with less checking.
12309@@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
12310 * Returns zero on success, or -EFAULT on error.
12311 */
12312
12313+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12314+#define __put_user(x, ptr) put_user((x), (ptr))
12315+#else
12316 #define __put_user(x, ptr) \
12317 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12318+#endif
12319
12320 #define __get_user_unaligned __get_user
12321 #define __put_user_unaligned __put_user
12322@@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
12323 #define get_user_ex(x, ptr) do { \
12324 unsigned long __gue_val; \
12325 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12326- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12327+ (x) = (__typeof__(*(ptr)))__gue_val; \
12328 } while (0)
12329
12330 #ifdef CONFIG_X86_WP_WORKS_OK
12331@@ -567,6 +628,7 @@ extern struct movsl_mask {
12332
12333 #define ARCH_HAS_NOCACHE_UACCESS 1
12334
12335+#define ARCH_HAS_SORT_EXTABLE
12336 #ifdef CONFIG_X86_32
12337 # include "uaccess_32.h"
12338 #else
12339diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12340index 632fb44..e30e334 100644
12341--- a/arch/x86/include/asm/uaccess_32.h
12342+++ b/arch/x86/include/asm/uaccess_32.h
12343@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12344 static __always_inline unsigned long __must_check
12345 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12346 {
12347+ pax_track_stack();
12348+
12349+ if ((long)n < 0)
12350+ return n;
12351+
12352 if (__builtin_constant_p(n)) {
12353 unsigned long ret;
12354
12355@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12356 return ret;
12357 }
12358 }
12359+ if (!__builtin_constant_p(n))
12360+ check_object_size(from, n, true);
12361 return __copy_to_user_ll(to, from, n);
12362 }
12363
12364@@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
12365 __copy_to_user(void __user *to, const void *from, unsigned long n)
12366 {
12367 might_fault();
12368+
12369 return __copy_to_user_inatomic(to, from, n);
12370 }
12371
12372 static __always_inline unsigned long
12373 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12374 {
12375+ if ((long)n < 0)
12376+ return n;
12377+
12378 /* Avoid zeroing the tail if the copy fails..
12379 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12380 * but as the zeroing behaviour is only significant when n is not
12381@@ -138,6 +149,12 @@ static __always_inline unsigned long
12382 __copy_from_user(void *to, const void __user *from, unsigned long n)
12383 {
12384 might_fault();
12385+
12386+ pax_track_stack();
12387+
12388+ if ((long)n < 0)
12389+ return n;
12390+
12391 if (__builtin_constant_p(n)) {
12392 unsigned long ret;
12393
12394@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12395 return ret;
12396 }
12397 }
12398+ if (!__builtin_constant_p(n))
12399+ check_object_size(to, n, false);
12400 return __copy_from_user_ll(to, from, n);
12401 }
12402
12403@@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12404 const void __user *from, unsigned long n)
12405 {
12406 might_fault();
12407+
12408+ if ((long)n < 0)
12409+ return n;
12410+
12411 if (__builtin_constant_p(n)) {
12412 unsigned long ret;
12413
12414@@ -182,14 +205,62 @@ static __always_inline unsigned long
12415 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12416 unsigned long n)
12417 {
12418- return __copy_from_user_ll_nocache_nozero(to, from, n);
12419+ if ((long)n < 0)
12420+ return n;
12421+
12422+ return __copy_from_user_ll_nocache_nozero(to, from, n);
12423+}
12424+
12425+/**
12426+ * copy_to_user: - Copy a block of data into user space.
12427+ * @to: Destination address, in user space.
12428+ * @from: Source address, in kernel space.
12429+ * @n: Number of bytes to copy.
12430+ *
12431+ * Context: User context only. This function may sleep.
12432+ *
12433+ * Copy data from kernel space to user space.
12434+ *
12435+ * Returns number of bytes that could not be copied.
12436+ * On success, this will be zero.
12437+ */
12438+static __always_inline unsigned long __must_check
12439+copy_to_user(void __user *to, const void *from, unsigned long n)
12440+{
12441+ if (access_ok(VERIFY_WRITE, to, n))
12442+ n = __copy_to_user(to, from, n);
12443+ return n;
12444+}
12445+
12446+/**
12447+ * copy_from_user: - Copy a block of data from user space.
12448+ * @to: Destination address, in kernel space.
12449+ * @from: Source address, in user space.
12450+ * @n: Number of bytes to copy.
12451+ *
12452+ * Context: User context only. This function may sleep.
12453+ *
12454+ * Copy data from user space to kernel space.
12455+ *
12456+ * Returns number of bytes that could not be copied.
12457+ * On success, this will be zero.
12458+ *
12459+ * If some data could not be copied, this function will pad the copied
12460+ * data to the requested size using zero bytes.
12461+ */
12462+static __always_inline unsigned long __must_check
12463+copy_from_user(void *to, const void __user *from, unsigned long n)
12464+{
12465+ if (access_ok(VERIFY_READ, from, n))
12466+ n = __copy_from_user(to, from, n);
12467+ else if ((long)n > 0) {
12468+ if (!__builtin_constant_p(n))
12469+ check_object_size(to, n, false);
12470+ memset(to, 0, n);
12471+ }
12472+ return n;
12473 }
12474
12475-unsigned long __must_check copy_to_user(void __user *to,
12476- const void *from, unsigned long n);
12477-unsigned long __must_check copy_from_user(void *to,
12478- const void __user *from,
12479- unsigned long n);
12480 long __must_check strncpy_from_user(char *dst, const char __user *src,
12481 long count);
12482 long __must_check __strncpy_from_user(char *dst,
12483diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12484index db24b21..72a9dfc 100644
12485--- a/arch/x86/include/asm/uaccess_64.h
12486+++ b/arch/x86/include/asm/uaccess_64.h
12487@@ -9,6 +9,9 @@
12488 #include <linux/prefetch.h>
12489 #include <linux/lockdep.h>
12490 #include <asm/page.h>
12491+#include <asm/pgtable.h>
12492+
12493+#define set_fs(x) (current_thread_info()->addr_limit = (x))
12494
12495 /*
12496 * Copy To/From Userspace
12497@@ -19,113 +22,203 @@ __must_check unsigned long
12498 copy_user_generic(void *to, const void *from, unsigned len);
12499
12500 __must_check unsigned long
12501-copy_to_user(void __user *to, const void *from, unsigned len);
12502-__must_check unsigned long
12503-copy_from_user(void *to, const void __user *from, unsigned len);
12504-__must_check unsigned long
12505 copy_in_user(void __user *to, const void __user *from, unsigned len);
12506
12507 static __always_inline __must_check
12508-int __copy_from_user(void *dst, const void __user *src, unsigned size)
12509+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
12510 {
12511- int ret = 0;
12512+ unsigned ret = 0;
12513
12514 might_fault();
12515- if (!__builtin_constant_p(size))
12516- return copy_user_generic(dst, (__force void *)src, size);
12517+
12518+ if ((int)size < 0)
12519+ return size;
12520+
12521+#ifdef CONFIG_PAX_MEMORY_UDEREF
12522+ if (!__access_ok(VERIFY_READ, src, size))
12523+ return size;
12524+#endif
12525+
12526+ if (!__builtin_constant_p(size)) {
12527+ check_object_size(dst, size, false);
12528+
12529+#ifdef CONFIG_PAX_MEMORY_UDEREF
12530+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12531+ src += PAX_USER_SHADOW_BASE;
12532+#endif
12533+
12534+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12535+ }
12536 switch (size) {
12537- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12538+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12539 ret, "b", "b", "=q", 1);
12540 return ret;
12541- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12542+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12543 ret, "w", "w", "=r", 2);
12544 return ret;
12545- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12546+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12547 ret, "l", "k", "=r", 4);
12548 return ret;
12549- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12550+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12551 ret, "q", "", "=r", 8);
12552 return ret;
12553 case 10:
12554- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12555+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12556 ret, "q", "", "=r", 10);
12557 if (unlikely(ret))
12558 return ret;
12559 __get_user_asm(*(u16 *)(8 + (char *)dst),
12560- (u16 __user *)(8 + (char __user *)src),
12561+ (const u16 __user *)(8 + (const char __user *)src),
12562 ret, "w", "w", "=r", 2);
12563 return ret;
12564 case 16:
12565- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12566+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12567 ret, "q", "", "=r", 16);
12568 if (unlikely(ret))
12569 return ret;
12570 __get_user_asm(*(u64 *)(8 + (char *)dst),
12571- (u64 __user *)(8 + (char __user *)src),
12572+ (const u64 __user *)(8 + (const char __user *)src),
12573 ret, "q", "", "=r", 8);
12574 return ret;
12575 default:
12576- return copy_user_generic(dst, (__force void *)src, size);
12577+
12578+#ifdef CONFIG_PAX_MEMORY_UDEREF
12579+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12580+ src += PAX_USER_SHADOW_BASE;
12581+#endif
12582+
12583+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12584 }
12585 }
12586
12587 static __always_inline __must_check
12588-int __copy_to_user(void __user *dst, const void *src, unsigned size)
12589+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
12590 {
12591- int ret = 0;
12592+ unsigned ret = 0;
12593
12594 might_fault();
12595- if (!__builtin_constant_p(size))
12596- return copy_user_generic((__force void *)dst, src, size);
12597+
12598+ pax_track_stack();
12599+
12600+ if ((int)size < 0)
12601+ return size;
12602+
12603+#ifdef CONFIG_PAX_MEMORY_UDEREF
12604+ if (!__access_ok(VERIFY_WRITE, dst, size))
12605+ return size;
12606+#endif
12607+
12608+ if (!__builtin_constant_p(size)) {
12609+ check_object_size(src, size, true);
12610+
12611+#ifdef CONFIG_PAX_MEMORY_UDEREF
12612+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12613+ dst += PAX_USER_SHADOW_BASE;
12614+#endif
12615+
12616+ return copy_user_generic((__force_kernel void *)dst, src, size);
12617+ }
12618 switch (size) {
12619- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12620+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12621 ret, "b", "b", "iq", 1);
12622 return ret;
12623- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12624+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12625 ret, "w", "w", "ir", 2);
12626 return ret;
12627- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12628+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12629 ret, "l", "k", "ir", 4);
12630 return ret;
12631- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12632+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12633 ret, "q", "", "er", 8);
12634 return ret;
12635 case 10:
12636- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12637+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12638 ret, "q", "", "er", 10);
12639 if (unlikely(ret))
12640 return ret;
12641 asm("":::"memory");
12642- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12643+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12644 ret, "w", "w", "ir", 2);
12645 return ret;
12646 case 16:
12647- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12648+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12649 ret, "q", "", "er", 16);
12650 if (unlikely(ret))
12651 return ret;
12652 asm("":::"memory");
12653- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12654+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12655 ret, "q", "", "er", 8);
12656 return ret;
12657 default:
12658- return copy_user_generic((__force void *)dst, src, size);
12659+
12660+#ifdef CONFIG_PAX_MEMORY_UDEREF
12661+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12662+ dst += PAX_USER_SHADOW_BASE;
12663+#endif
12664+
12665+ return copy_user_generic((__force_kernel void *)dst, src, size);
12666 }
12667 }
12668
12669 static __always_inline __must_check
12670-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12671+unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
12672 {
12673- int ret = 0;
12674+ if (access_ok(VERIFY_WRITE, to, len))
12675+ len = __copy_to_user(to, from, len);
12676+ return len;
12677+}
12678+
12679+static __always_inline __must_check
12680+unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
12681+{
12682+ if ((int)len < 0)
12683+ return len;
12684+
12685+ if (access_ok(VERIFY_READ, from, len))
12686+ len = __copy_from_user(to, from, len);
12687+ else if ((int)len > 0) {
12688+ if (!__builtin_constant_p(len))
12689+ check_object_size(to, len, false);
12690+ memset(to, 0, len);
12691+ }
12692+ return len;
12693+}
12694+
12695+static __always_inline __must_check
12696+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12697+{
12698+ unsigned ret = 0;
12699
12700 might_fault();
12701- if (!__builtin_constant_p(size))
12702- return copy_user_generic((__force void *)dst,
12703- (__force void *)src, size);
12704+
12705+ pax_track_stack();
12706+
12707+ if ((int)size < 0)
12708+ return size;
12709+
12710+#ifdef CONFIG_PAX_MEMORY_UDEREF
12711+ if (!__access_ok(VERIFY_READ, src, size))
12712+ return size;
12713+ if (!__access_ok(VERIFY_WRITE, dst, size))
12714+ return size;
12715+#endif
12716+
12717+ if (!__builtin_constant_p(size)) {
12718+
12719+#ifdef CONFIG_PAX_MEMORY_UDEREF
12720+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12721+ src += PAX_USER_SHADOW_BASE;
12722+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12723+ dst += PAX_USER_SHADOW_BASE;
12724+#endif
12725+
12726+ return copy_user_generic((__force_kernel void *)dst,
12727+ (__force_kernel const void *)src, size);
12728+ }
12729 switch (size) {
12730 case 1: {
12731 u8 tmp;
12732- __get_user_asm(tmp, (u8 __user *)src,
12733+ __get_user_asm(tmp, (const u8 __user *)src,
12734 ret, "b", "b", "=q", 1);
12735 if (likely(!ret))
12736 __put_user_asm(tmp, (u8 __user *)dst,
12737@@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12738 }
12739 case 2: {
12740 u16 tmp;
12741- __get_user_asm(tmp, (u16 __user *)src,
12742+ __get_user_asm(tmp, (const u16 __user *)src,
12743 ret, "w", "w", "=r", 2);
12744 if (likely(!ret))
12745 __put_user_asm(tmp, (u16 __user *)dst,
12746@@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12747
12748 case 4: {
12749 u32 tmp;
12750- __get_user_asm(tmp, (u32 __user *)src,
12751+ __get_user_asm(tmp, (const u32 __user *)src,
12752 ret, "l", "k", "=r", 4);
12753 if (likely(!ret))
12754 __put_user_asm(tmp, (u32 __user *)dst,
12755@@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12756 }
12757 case 8: {
12758 u64 tmp;
12759- __get_user_asm(tmp, (u64 __user *)src,
12760+ __get_user_asm(tmp, (const u64 __user *)src,
12761 ret, "q", "", "=r", 8);
12762 if (likely(!ret))
12763 __put_user_asm(tmp, (u64 __user *)dst,
12764@@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12765 return ret;
12766 }
12767 default:
12768- return copy_user_generic((__force void *)dst,
12769- (__force void *)src, size);
12770+
12771+#ifdef CONFIG_PAX_MEMORY_UDEREF
12772+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12773+ src += PAX_USER_SHADOW_BASE;
12774+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12775+ dst += PAX_USER_SHADOW_BASE;
12776+#endif
12777+
12778+ return copy_user_generic((__force_kernel void *)dst,
12779+ (__force_kernel const void *)src, size);
12780 }
12781 }
12782
12783@@ -176,33 +277,75 @@ __must_check long strlen_user(const char __user *str);
12784 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
12785 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12786
12787-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
12788- unsigned size);
12789+static __must_check __always_inline unsigned long
12790+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
12791+{
12792+ pax_track_stack();
12793+
12794+ if ((int)size < 0)
12795+ return size;
12796+
12797+#ifdef CONFIG_PAX_MEMORY_UDEREF
12798+ if (!__access_ok(VERIFY_READ, src, size))
12799+ return size;
12800
12801-static __must_check __always_inline int
12802+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12803+ src += PAX_USER_SHADOW_BASE;
12804+#endif
12805+
12806+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12807+}
12808+
12809+static __must_check __always_inline unsigned long
12810 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12811 {
12812- return copy_user_generic((__force void *)dst, src, size);
12813+ if ((int)size < 0)
12814+ return size;
12815+
12816+#ifdef CONFIG_PAX_MEMORY_UDEREF
12817+ if (!__access_ok(VERIFY_WRITE, dst, size))
12818+ return size;
12819+
12820+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12821+ dst += PAX_USER_SHADOW_BASE;
12822+#endif
12823+
12824+ return copy_user_generic((__force_kernel void *)dst, src, size);
12825 }
12826
12827-extern long __copy_user_nocache(void *dst, const void __user *src,
12828+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12829 unsigned size, int zerorest);
12830
12831-static inline int
12832-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12833+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12834 {
12835 might_sleep();
12836+
12837+ if ((int)size < 0)
12838+ return size;
12839+
12840+#ifdef CONFIG_PAX_MEMORY_UDEREF
12841+ if (!__access_ok(VERIFY_READ, src, size))
12842+ return size;
12843+#endif
12844+
12845 return __copy_user_nocache(dst, src, size, 1);
12846 }
12847
12848-static inline int
12849-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12850+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12851 unsigned size)
12852 {
12853+ if ((int)size < 0)
12854+ return size;
12855+
12856+#ifdef CONFIG_PAX_MEMORY_UDEREF
12857+ if (!__access_ok(VERIFY_READ, src, size))
12858+ return size;
12859+#endif
12860+
12861 return __copy_user_nocache(dst, src, size, 0);
12862 }
12863
12864-unsigned long
12865-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12866+extern unsigned long
12867+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
12868
12869 #endif /* _ASM_X86_UACCESS_64_H */
12870diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12871index 9064052..786cfbc 100644
12872--- a/arch/x86/include/asm/vdso.h
12873+++ b/arch/x86/include/asm/vdso.h
12874@@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
12875 #define VDSO32_SYMBOL(base, name) \
12876 ({ \
12877 extern const char VDSO32_##name[]; \
12878- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12879+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12880 })
12881 #endif
12882
12883diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
12884index 3d61e20..9507180 100644
12885--- a/arch/x86/include/asm/vgtod.h
12886+++ b/arch/x86/include/asm/vgtod.h
12887@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
12888 int sysctl_enabled;
12889 struct timezone sys_tz;
12890 struct { /* extract of a clocksource struct */
12891+ char name[8];
12892 cycle_t (*vread)(void);
12893 cycle_t cycle_last;
12894 cycle_t mask;
12895diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
12896index 61e08c0..b0da582 100644
12897--- a/arch/x86/include/asm/vmi.h
12898+++ b/arch/x86/include/asm/vmi.h
12899@@ -191,6 +191,7 @@ struct vrom_header {
12900 u8 reserved[96]; /* Reserved for headers */
12901 char vmi_init[8]; /* VMI_Init jump point */
12902 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
12903+ char rom_data[8048]; /* rest of the option ROM */
12904 } __attribute__((packed));
12905
12906 struct pnp_header {
12907diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
12908index c6e0bee..fcb9f74 100644
12909--- a/arch/x86/include/asm/vmi_time.h
12910+++ b/arch/x86/include/asm/vmi_time.h
12911@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
12912 int (*wallclock_updated)(void);
12913 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
12914 void (*cancel_alarm)(u32 flags);
12915-} vmi_timer_ops;
12916+} __no_const vmi_timer_ops;
12917
12918 /* Prototypes */
12919 extern void __init vmi_time_init(void);
12920diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
12921index d0983d2..1f7c9e9 100644
12922--- a/arch/x86/include/asm/vsyscall.h
12923+++ b/arch/x86/include/asm/vsyscall.h
12924@@ -15,9 +15,10 @@ enum vsyscall_num {
12925
12926 #ifdef __KERNEL__
12927 #include <linux/seqlock.h>
12928+#include <linux/getcpu.h>
12929+#include <linux/time.h>
12930
12931 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
12932-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
12933
12934 /* Definitions for CONFIG_GENERIC_TIME definitions */
12935 #define __section_vsyscall_gtod_data __attribute__ \
12936@@ -31,7 +32,6 @@ enum vsyscall_num {
12937 #define VGETCPU_LSL 2
12938
12939 extern int __vgetcpu_mode;
12940-extern volatile unsigned long __jiffies;
12941
12942 /* kernel space (writeable) */
12943 extern int vgetcpu_mode;
12944@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
12945
12946 extern void map_vsyscall(void);
12947
12948+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
12949+extern time_t vtime(time_t *t);
12950+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
12951 #endif /* __KERNEL__ */
12952
12953 #endif /* _ASM_X86_VSYSCALL_H */
12954diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
12955index 2c756fd..3377e37 100644
12956--- a/arch/x86/include/asm/x86_init.h
12957+++ b/arch/x86/include/asm/x86_init.h
12958@@ -28,7 +28,7 @@ struct x86_init_mpparse {
12959 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
12960 void (*find_smp_config)(unsigned int reserve);
12961 void (*get_smp_config)(unsigned int early);
12962-};
12963+} __no_const;
12964
12965 /**
12966 * struct x86_init_resources - platform specific resource related ops
12967@@ -42,7 +42,7 @@ struct x86_init_resources {
12968 void (*probe_roms)(void);
12969 void (*reserve_resources)(void);
12970 char *(*memory_setup)(void);
12971-};
12972+} __no_const;
12973
12974 /**
12975 * struct x86_init_irqs - platform specific interrupt setup
12976@@ -55,7 +55,7 @@ struct x86_init_irqs {
12977 void (*pre_vector_init)(void);
12978 void (*intr_init)(void);
12979 void (*trap_init)(void);
12980-};
12981+} __no_const;
12982
12983 /**
12984 * struct x86_init_oem - oem platform specific customizing functions
12985@@ -65,7 +65,7 @@ struct x86_init_irqs {
12986 struct x86_init_oem {
12987 void (*arch_setup)(void);
12988 void (*banner)(void);
12989-};
12990+} __no_const;
12991
12992 /**
12993 * struct x86_init_paging - platform specific paging functions
12994@@ -75,7 +75,7 @@ struct x86_init_oem {
12995 struct x86_init_paging {
12996 void (*pagetable_setup_start)(pgd_t *base);
12997 void (*pagetable_setup_done)(pgd_t *base);
12998-};
12999+} __no_const;
13000
13001 /**
13002 * struct x86_init_timers - platform specific timer setup
13003@@ -88,7 +88,7 @@ struct x86_init_timers {
13004 void (*setup_percpu_clockev)(void);
13005 void (*tsc_pre_init)(void);
13006 void (*timer_init)(void);
13007-};
13008+} __no_const;
13009
13010 /**
13011 * struct x86_init_ops - functions for platform specific setup
13012@@ -101,7 +101,7 @@ struct x86_init_ops {
13013 struct x86_init_oem oem;
13014 struct x86_init_paging paging;
13015 struct x86_init_timers timers;
13016-};
13017+} __no_const;
13018
13019 /**
13020 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13021@@ -109,7 +109,7 @@ struct x86_init_ops {
13022 */
13023 struct x86_cpuinit_ops {
13024 void (*setup_percpu_clockev)(void);
13025-};
13026+} __no_const;
13027
13028 /**
13029 * struct x86_platform_ops - platform specific runtime functions
13030@@ -121,7 +121,7 @@ struct x86_platform_ops {
13031 unsigned long (*calibrate_tsc)(void);
13032 unsigned long (*get_wallclock)(void);
13033 int (*set_wallclock)(unsigned long nowtime);
13034-};
13035+} __no_const;
13036
13037 extern struct x86_init_ops x86_init;
13038 extern struct x86_cpuinit_ops x86_cpuinit;
13039diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13040index 727acc1..554f3eb 100644
13041--- a/arch/x86/include/asm/xsave.h
13042+++ b/arch/x86/include/asm/xsave.h
13043@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13044 static inline int xsave_user(struct xsave_struct __user *buf)
13045 {
13046 int err;
13047+
13048+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13049+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13050+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13051+#endif
13052+
13053 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13054 "2:\n"
13055 ".section .fixup,\"ax\"\n"
13056@@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13057 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13058 {
13059 int err;
13060- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13061+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13062 u32 lmask = mask;
13063 u32 hmask = mask >> 32;
13064
13065+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13066+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13067+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13068+#endif
13069+
13070 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13071 "2:\n"
13072 ".section .fixup,\"ax\"\n"
13073diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13074index 6a564ac..9b1340c 100644
13075--- a/arch/x86/kernel/acpi/realmode/Makefile
13076+++ b/arch/x86/kernel/acpi/realmode/Makefile
13077@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13078 $(call cc-option, -fno-stack-protector) \
13079 $(call cc-option, -mpreferred-stack-boundary=2)
13080 KBUILD_CFLAGS += $(call cc-option, -m32)
13081+ifdef CONSTIFY_PLUGIN
13082+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13083+endif
13084 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13085 GCOV_PROFILE := n
13086
13087diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13088index 580b4e2..d4129e4 100644
13089--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13090+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13091@@ -91,6 +91,9 @@ _start:
13092 /* Do any other stuff... */
13093
13094 #ifndef CONFIG_64BIT
13095+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
13096+ call verify_cpu
13097+
13098 /* This could also be done in C code... */
13099 movl pmode_cr3, %eax
13100 movl %eax, %cr3
13101@@ -104,7 +107,7 @@ _start:
13102 movl %eax, %ecx
13103 orl %edx, %ecx
13104 jz 1f
13105- movl $0xc0000080, %ecx
13106+ mov $MSR_EFER, %ecx
13107 wrmsr
13108 1:
13109
13110@@ -114,6 +117,7 @@ _start:
13111 movl pmode_cr0, %eax
13112 movl %eax, %cr0
13113 jmp pmode_return
13114+# include "../../verify_cpu.S"
13115 #else
13116 pushw $0
13117 pushw trampoline_segment
13118diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13119index ca93638..7042f24 100644
13120--- a/arch/x86/kernel/acpi/sleep.c
13121+++ b/arch/x86/kernel/acpi/sleep.c
13122@@ -11,11 +11,12 @@
13123 #include <linux/cpumask.h>
13124 #include <asm/segment.h>
13125 #include <asm/desc.h>
13126+#include <asm/e820.h>
13127
13128 #include "realmode/wakeup.h"
13129 #include "sleep.h"
13130
13131-unsigned long acpi_wakeup_address;
13132+unsigned long acpi_wakeup_address = 0x2000;
13133 unsigned long acpi_realmode_flags;
13134
13135 /* address in low memory of the wakeup routine. */
13136@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13137 #else /* CONFIG_64BIT */
13138 header->trampoline_segment = setup_trampoline() >> 4;
13139 #ifdef CONFIG_SMP
13140- stack_start.sp = temp_stack + sizeof(temp_stack);
13141+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13142+
13143+ pax_open_kernel();
13144 early_gdt_descr.address =
13145 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13146+ pax_close_kernel();
13147+
13148 initial_gs = per_cpu_offset(smp_processor_id());
13149 #endif
13150 initial_code = (unsigned long)wakeup_long64;
13151@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13152 return;
13153 }
13154
13155- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13156-
13157- if (!acpi_realmode) {
13158- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13159- return;
13160- }
13161-
13162- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13163+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13164+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13165 }
13166
13167
13168diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13169index 8ded418..079961e 100644
13170--- a/arch/x86/kernel/acpi/wakeup_32.S
13171+++ b/arch/x86/kernel/acpi/wakeup_32.S
13172@@ -30,13 +30,11 @@ wakeup_pmode_return:
13173 # and restore the stack ... but you need gdt for this to work
13174 movl saved_context_esp, %esp
13175
13176- movl %cs:saved_magic, %eax
13177- cmpl $0x12345678, %eax
13178+ cmpl $0x12345678, saved_magic
13179 jne bogus_magic
13180
13181 # jump to place where we left off
13182- movl saved_eip, %eax
13183- jmp *%eax
13184+ jmp *(saved_eip)
13185
13186 bogus_magic:
13187 jmp bogus_magic
13188diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13189index de7353c..075da5f 100644
13190--- a/arch/x86/kernel/alternative.c
13191+++ b/arch/x86/kernel/alternative.c
13192@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13193
13194 BUG_ON(p->len > MAX_PATCH_LEN);
13195 /* prep the buffer with the original instructions */
13196- memcpy(insnbuf, p->instr, p->len);
13197+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13198 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13199 (unsigned long)p->instr, p->len);
13200
13201@@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13202 if (smp_alt_once)
13203 free_init_pages("SMP alternatives",
13204 (unsigned long)__smp_locks,
13205- (unsigned long)__smp_locks_end);
13206+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13207
13208 restart_nmi();
13209 }
13210@@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13211 * instructions. And on the local CPU you need to be protected again NMI or MCE
13212 * handlers seeing an inconsistent instruction while you patch.
13213 */
13214-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13215+static void *__kprobes text_poke_early(void *addr, const void *opcode,
13216 size_t len)
13217 {
13218 unsigned long flags;
13219 local_irq_save(flags);
13220- memcpy(addr, opcode, len);
13221+
13222+ pax_open_kernel();
13223+ memcpy(ktla_ktva(addr), opcode, len);
13224 sync_core();
13225+ pax_close_kernel();
13226+
13227 local_irq_restore(flags);
13228 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13229 that causes hangs on some VIA CPUs. */
13230@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13231 */
13232 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13233 {
13234- unsigned long flags;
13235- char *vaddr;
13236+ unsigned char *vaddr = ktla_ktva(addr);
13237 struct page *pages[2];
13238- int i;
13239+ size_t i;
13240
13241 if (!core_kernel_text((unsigned long)addr)) {
13242- pages[0] = vmalloc_to_page(addr);
13243- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13244+ pages[0] = vmalloc_to_page(vaddr);
13245+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13246 } else {
13247- pages[0] = virt_to_page(addr);
13248+ pages[0] = virt_to_page(vaddr);
13249 WARN_ON(!PageReserved(pages[0]));
13250- pages[1] = virt_to_page(addr + PAGE_SIZE);
13251+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13252 }
13253 BUG_ON(!pages[0]);
13254- local_irq_save(flags);
13255- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13256- if (pages[1])
13257- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13258- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13259- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13260- clear_fixmap(FIX_TEXT_POKE0);
13261- if (pages[1])
13262- clear_fixmap(FIX_TEXT_POKE1);
13263- local_flush_tlb();
13264- sync_core();
13265- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13266- that causes hangs on some VIA CPUs. */
13267+ text_poke_early(addr, opcode, len);
13268 for (i = 0; i < len; i++)
13269- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13270- local_irq_restore(flags);
13271+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13272 return addr;
13273 }
13274diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
13275index 3a44b75..1601800 100644
13276--- a/arch/x86/kernel/amd_iommu.c
13277+++ b/arch/x86/kernel/amd_iommu.c
13278@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
13279 }
13280 }
13281
13282-static struct dma_map_ops amd_iommu_dma_ops = {
13283+static const struct dma_map_ops amd_iommu_dma_ops = {
13284 .alloc_coherent = alloc_coherent,
13285 .free_coherent = free_coherent,
13286 .map_page = map_page,
13287diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13288index 1d2d670..8e3f477 100644
13289--- a/arch/x86/kernel/apic/apic.c
13290+++ b/arch/x86/kernel/apic/apic.c
13291@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
13292 /*
13293 * Debug level, exported for io_apic.c
13294 */
13295-unsigned int apic_verbosity;
13296+int apic_verbosity;
13297
13298 int pic_mode;
13299
13300@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13301 apic_write(APIC_ESR, 0);
13302 v1 = apic_read(APIC_ESR);
13303 ack_APIC_irq();
13304- atomic_inc(&irq_err_count);
13305+ atomic_inc_unchecked(&irq_err_count);
13306
13307 /*
13308 * Here is what the APIC error bits mean:
13309@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
13310 u16 *bios_cpu_apicid;
13311 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
13312
13313+ pax_track_stack();
13314+
13315 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
13316 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
13317
13318diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13319index 8928d97..f799cea 100644
13320--- a/arch/x86/kernel/apic/io_apic.c
13321+++ b/arch/x86/kernel/apic/io_apic.c
13322@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
13323 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
13324 GFP_ATOMIC);
13325 if (!ioapic_entries)
13326- return 0;
13327+ return NULL;
13328
13329 for (apic = 0; apic < nr_ioapics; apic++) {
13330 ioapic_entries[apic] =
13331@@ -733,7 +733,7 @@ nomem:
13332 kfree(ioapic_entries[apic]);
13333 kfree(ioapic_entries);
13334
13335- return 0;
13336+ return NULL;
13337 }
13338
13339 /*
13340@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13341 }
13342 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13343
13344-void lock_vector_lock(void)
13345+void lock_vector_lock(void) __acquires(vector_lock)
13346 {
13347 /* Used to the online set of cpus does not change
13348 * during assign_irq_vector.
13349@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
13350 spin_lock(&vector_lock);
13351 }
13352
13353-void unlock_vector_lock(void)
13354+void unlock_vector_lock(void) __releases(vector_lock)
13355 {
13356 spin_unlock(&vector_lock);
13357 }
13358@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
13359 ack_APIC_irq();
13360 }
13361
13362-atomic_t irq_mis_count;
13363+atomic_unchecked_t irq_mis_count;
13364
13365 static void ack_apic_level(unsigned int irq)
13366 {
13367@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
13368
13369 /* Tail end of version 0x11 I/O APIC bug workaround */
13370 if (!(v & (1 << (i & 0x1f)))) {
13371- atomic_inc(&irq_mis_count);
13372+ atomic_inc_unchecked(&irq_mis_count);
13373 spin_lock(&ioapic_lock);
13374 __mask_and_edge_IO_APIC_irq(cfg);
13375 __unmask_and_level_IO_APIC_irq(cfg);
13376diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13377index 151ace6..f317474 100644
13378--- a/arch/x86/kernel/apm_32.c
13379+++ b/arch/x86/kernel/apm_32.c
13380@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
13381 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13382 * even though they are called in protected mode.
13383 */
13384-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13385+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13386 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13387
13388 static const char driver_version[] = "1.16ac"; /* no spaces */
13389@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13390 BUG_ON(cpu != 0);
13391 gdt = get_cpu_gdt_table(cpu);
13392 save_desc_40 = gdt[0x40 / 8];
13393+
13394+ pax_open_kernel();
13395 gdt[0x40 / 8] = bad_bios_desc;
13396+ pax_close_kernel();
13397
13398 apm_irq_save(flags);
13399 APM_DO_SAVE_SEGS;
13400@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13401 &call->esi);
13402 APM_DO_RESTORE_SEGS;
13403 apm_irq_restore(flags);
13404+
13405+ pax_open_kernel();
13406 gdt[0x40 / 8] = save_desc_40;
13407+ pax_close_kernel();
13408+
13409 put_cpu();
13410
13411 return call->eax & 0xff;
13412@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13413 BUG_ON(cpu != 0);
13414 gdt = get_cpu_gdt_table(cpu);
13415 save_desc_40 = gdt[0x40 / 8];
13416+
13417+ pax_open_kernel();
13418 gdt[0x40 / 8] = bad_bios_desc;
13419+ pax_close_kernel();
13420
13421 apm_irq_save(flags);
13422 APM_DO_SAVE_SEGS;
13423@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13424 &call->eax);
13425 APM_DO_RESTORE_SEGS;
13426 apm_irq_restore(flags);
13427+
13428+ pax_open_kernel();
13429 gdt[0x40 / 8] = save_desc_40;
13430+ pax_close_kernel();
13431+
13432 put_cpu();
13433 return error;
13434 }
13435@@ -975,7 +989,7 @@ recalc:
13436
13437 static void apm_power_off(void)
13438 {
13439- unsigned char po_bios_call[] = {
13440+ const unsigned char po_bios_call[] = {
13441 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
13442 0x8e, 0xd0, /* movw ax,ss */
13443 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
13444@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
13445 * code to that CPU.
13446 */
13447 gdt = get_cpu_gdt_table(0);
13448+
13449+ pax_open_kernel();
13450 set_desc_base(&gdt[APM_CS >> 3],
13451 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13452 set_desc_base(&gdt[APM_CS_16 >> 3],
13453 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13454 set_desc_base(&gdt[APM_DS >> 3],
13455 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13456+ pax_close_kernel();
13457
13458 proc_create("apm", 0, NULL, &apm_file_ops);
13459
13460diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
13461index dfdbf64..9b2b6ce 100644
13462--- a/arch/x86/kernel/asm-offsets_32.c
13463+++ b/arch/x86/kernel/asm-offsets_32.c
13464@@ -51,7 +51,6 @@ void foo(void)
13465 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
13466 BLANK();
13467
13468- OFFSET(TI_task, thread_info, task);
13469 OFFSET(TI_exec_domain, thread_info, exec_domain);
13470 OFFSET(TI_flags, thread_info, flags);
13471 OFFSET(TI_status, thread_info, status);
13472@@ -60,6 +59,8 @@ void foo(void)
13473 OFFSET(TI_restart_block, thread_info, restart_block);
13474 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
13475 OFFSET(TI_cpu, thread_info, cpu);
13476+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13477+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13478 BLANK();
13479
13480 OFFSET(GDS_size, desc_ptr, size);
13481@@ -99,6 +100,7 @@ void foo(void)
13482
13483 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13484 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13485+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13486 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
13487 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
13488 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
13489@@ -115,6 +117,11 @@ void foo(void)
13490 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
13491 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13492 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13493+
13494+#ifdef CONFIG_PAX_KERNEXEC
13495+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13496+#endif
13497+
13498 #endif
13499
13500 #ifdef CONFIG_XEN
13501diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13502index 4a6aeed..371de20 100644
13503--- a/arch/x86/kernel/asm-offsets_64.c
13504+++ b/arch/x86/kernel/asm-offsets_64.c
13505@@ -44,6 +44,8 @@ int main(void)
13506 ENTRY(addr_limit);
13507 ENTRY(preempt_count);
13508 ENTRY(status);
13509+ ENTRY(lowest_stack);
13510+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13511 #ifdef CONFIG_IA32_EMULATION
13512 ENTRY(sysenter_return);
13513 #endif
13514@@ -63,6 +65,18 @@ int main(void)
13515 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13516 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
13517 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13518+
13519+#ifdef CONFIG_PAX_KERNEXEC
13520+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13521+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13522+#endif
13523+
13524+#ifdef CONFIG_PAX_MEMORY_UDEREF
13525+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13526+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13527+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13528+#endif
13529+
13530 #endif
13531
13532
13533@@ -115,6 +129,7 @@ int main(void)
13534 ENTRY(cr8);
13535 BLANK();
13536 #undef ENTRY
13537+ DEFINE(TSS_size, sizeof(struct tss_struct));
13538 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
13539 BLANK();
13540 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
13541@@ -130,6 +145,7 @@ int main(void)
13542
13543 BLANK();
13544 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13545+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13546 #ifdef CONFIG_XEN
13547 BLANK();
13548 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13549diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13550index ff502cc..dc5133e 100644
13551--- a/arch/x86/kernel/cpu/Makefile
13552+++ b/arch/x86/kernel/cpu/Makefile
13553@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
13554 CFLAGS_REMOVE_common.o = -pg
13555 endif
13556
13557-# Make sure load_percpu_segment has no stackprotector
13558-nostackp := $(call cc-option, -fno-stack-protector)
13559-CFLAGS_common.o := $(nostackp)
13560-
13561 obj-y := intel_cacheinfo.o addon_cpuid_features.o
13562 obj-y += proc.o capflags.o powerflags.o common.o
13563 obj-y += vmware.o hypervisor.o sched.o
13564diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13565index 6e082dc..a0b5f36 100644
13566--- a/arch/x86/kernel/cpu/amd.c
13567+++ b/arch/x86/kernel/cpu/amd.c
13568@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13569 unsigned int size)
13570 {
13571 /* AMD errata T13 (order #21922) */
13572- if ((c->x86 == 6)) {
13573+ if (c->x86 == 6) {
13574 /* Duron Rev A0 */
13575 if (c->x86_model == 3 && c->x86_mask == 0)
13576 size = 64;
13577diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13578index 4e34d10..a53b130a 100644
13579--- a/arch/x86/kernel/cpu/common.c
13580+++ b/arch/x86/kernel/cpu/common.c
13581@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13582
13583 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13584
13585-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13586-#ifdef CONFIG_X86_64
13587- /*
13588- * We need valid kernel segments for data and code in long mode too
13589- * IRET will check the segment types kkeil 2000/10/28
13590- * Also sysret mandates a special GDT layout
13591- *
13592- * TLS descriptors are currently at a different place compared to i386.
13593- * Hopefully nobody expects them at a fixed place (Wine?)
13594- */
13595- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13596- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13597- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13598- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13599- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13600- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13601-#else
13602- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13603- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13604- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13605- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13606- /*
13607- * Segments used for calling PnP BIOS have byte granularity.
13608- * They code segments and data segments have fixed 64k limits,
13609- * the transfer segment sizes are set at run time.
13610- */
13611- /* 32-bit code */
13612- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13613- /* 16-bit code */
13614- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13615- /* 16-bit data */
13616- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13617- /* 16-bit data */
13618- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13619- /* 16-bit data */
13620- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13621- /*
13622- * The APM segments have byte granularity and their bases
13623- * are set at run time. All have 64k limits.
13624- */
13625- /* 32-bit code */
13626- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13627- /* 16-bit code */
13628- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13629- /* data */
13630- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13631-
13632- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13633- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13634- GDT_STACK_CANARY_INIT
13635-#endif
13636-} };
13637-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13638-
13639 static int __init x86_xsave_setup(char *s)
13640 {
13641 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13642@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
13643 {
13644 struct desc_ptr gdt_descr;
13645
13646- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13647+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13648 gdt_descr.size = GDT_SIZE - 1;
13649 load_gdt(&gdt_descr);
13650 /* Reload the per-cpu base */
13651@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13652 /* Filter out anything that depends on CPUID levels we don't have */
13653 filter_cpuid_features(c, true);
13654
13655+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
13656+ setup_clear_cpu_cap(X86_FEATURE_SEP);
13657+#endif
13658+
13659 /* If the model name is still unset, do table lookup. */
13660 if (!c->x86_model_id[0]) {
13661 const char *p;
13662@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
13663 }
13664 __setup("clearcpuid=", setup_disablecpuid);
13665
13666+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13667+EXPORT_PER_CPU_SYMBOL(current_tinfo);
13668+
13669 #ifdef CONFIG_X86_64
13670 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13671
13672@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13673 EXPORT_PER_CPU_SYMBOL(current_task);
13674
13675 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13676- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13677+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13678 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13679
13680 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13681@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13682 {
13683 memset(regs, 0, sizeof(struct pt_regs));
13684 regs->fs = __KERNEL_PERCPU;
13685- regs->gs = __KERNEL_STACK_CANARY;
13686+ savesegment(gs, regs->gs);
13687
13688 return regs;
13689 }
13690@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
13691 int i;
13692
13693 cpu = stack_smp_processor_id();
13694- t = &per_cpu(init_tss, cpu);
13695+ t = init_tss + cpu;
13696 orig_ist = &per_cpu(orig_ist, cpu);
13697
13698 #ifdef CONFIG_NUMA
13699@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
13700 switch_to_new_gdt(cpu);
13701 loadsegment(fs, 0);
13702
13703- load_idt((const struct desc_ptr *)&idt_descr);
13704+ load_idt(&idt_descr);
13705
13706 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13707 syscall_init();
13708@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
13709 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13710 barrier();
13711
13712- check_efer();
13713 if (cpu != 0)
13714 enable_x2apic();
13715
13716@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
13717 {
13718 int cpu = smp_processor_id();
13719 struct task_struct *curr = current;
13720- struct tss_struct *t = &per_cpu(init_tss, cpu);
13721+ struct tss_struct *t = init_tss + cpu;
13722 struct thread_struct *thread = &curr->thread;
13723
13724 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13725diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13726index 6a77cca..4f4fca0 100644
13727--- a/arch/x86/kernel/cpu/intel.c
13728+++ b/arch/x86/kernel/cpu/intel.c
13729@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13730 * Update the IDT descriptor and reload the IDT so that
13731 * it uses the read-only mapped virtual address.
13732 */
13733- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13734+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13735 load_idt(&idt_descr);
13736 }
13737 #endif
13738diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
13739index 417990f..96dc36b 100644
13740--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
13741+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
13742@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13743 return ret;
13744 }
13745
13746-static struct sysfs_ops sysfs_ops = {
13747+static const struct sysfs_ops sysfs_ops = {
13748 .show = show,
13749 .store = store,
13750 };
13751diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13752index 472763d..9831e11 100644
13753--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
13754+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13755@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
13756 static int inject_init(void)
13757 {
13758 printk(KERN_INFO "Machine check injector initialized\n");
13759- mce_chrdev_ops.write = mce_write;
13760+ pax_open_kernel();
13761+ *(void **)&mce_chrdev_ops.write = mce_write;
13762+ pax_close_kernel();
13763 register_die_notifier(&mce_raise_nb);
13764 return 0;
13765 }
13766diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13767index 0f16a2b..21740f5 100644
13768--- a/arch/x86/kernel/cpu/mcheck/mce.c
13769+++ b/arch/x86/kernel/cpu/mcheck/mce.c
13770@@ -43,6 +43,7 @@
13771 #include <asm/ipi.h>
13772 #include <asm/mce.h>
13773 #include <asm/msr.h>
13774+#include <asm/local.h>
13775
13776 #include "mce-internal.h"
13777
13778@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
13779 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13780 m->cs, m->ip);
13781
13782- if (m->cs == __KERNEL_CS)
13783+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13784 print_symbol("{%s}", m->ip);
13785 pr_cont("\n");
13786 }
13787@@ -221,10 +222,10 @@ static void print_mce_tail(void)
13788
13789 #define PANIC_TIMEOUT 5 /* 5 seconds */
13790
13791-static atomic_t mce_paniced;
13792+static atomic_unchecked_t mce_paniced;
13793
13794 static int fake_panic;
13795-static atomic_t mce_fake_paniced;
13796+static atomic_unchecked_t mce_fake_paniced;
13797
13798 /* Panic in progress. Enable interrupts and wait for final IPI */
13799 static void wait_for_panic(void)
13800@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13801 /*
13802 * Make sure only one CPU runs in machine check panic
13803 */
13804- if (atomic_inc_return(&mce_paniced) > 1)
13805+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13806 wait_for_panic();
13807 barrier();
13808
13809@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13810 console_verbose();
13811 } else {
13812 /* Don't log too much for fake panic */
13813- if (atomic_inc_return(&mce_fake_paniced) > 1)
13814+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13815 return;
13816 }
13817 print_mce_head();
13818@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
13819 * might have been modified by someone else.
13820 */
13821 rmb();
13822- if (atomic_read(&mce_paniced))
13823+ if (atomic_read_unchecked(&mce_paniced))
13824 wait_for_panic();
13825 if (!monarch_timeout)
13826 goto out;
13827@@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13828 }
13829
13830 /* Call the installed machine check handler for this CPU setup. */
13831-void (*machine_check_vector)(struct pt_regs *, long error_code) =
13832+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13833 unexpected_machine_check;
13834
13835 /*
13836@@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13837 return;
13838 }
13839
13840+ pax_open_kernel();
13841 machine_check_vector = do_machine_check;
13842+ pax_close_kernel();
13843
13844 mce_init();
13845 mce_cpu_features(c);
13846@@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13847 */
13848
13849 static DEFINE_SPINLOCK(mce_state_lock);
13850-static int open_count; /* #times opened */
13851+static local_t open_count; /* #times opened */
13852 static int open_exclu; /* already open exclusive? */
13853
13854 static int mce_open(struct inode *inode, struct file *file)
13855 {
13856 spin_lock(&mce_state_lock);
13857
13858- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
13859+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
13860 spin_unlock(&mce_state_lock);
13861
13862 return -EBUSY;
13863@@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
13864
13865 if (file->f_flags & O_EXCL)
13866 open_exclu = 1;
13867- open_count++;
13868+ local_inc(&open_count);
13869
13870 spin_unlock(&mce_state_lock);
13871
13872@@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
13873 {
13874 spin_lock(&mce_state_lock);
13875
13876- open_count--;
13877+ local_dec(&open_count);
13878 open_exclu = 0;
13879
13880 spin_unlock(&mce_state_lock);
13881@@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
13882 static void mce_reset(void)
13883 {
13884 cpu_missing = 0;
13885- atomic_set(&mce_fake_paniced, 0);
13886+ atomic_set_unchecked(&mce_fake_paniced, 0);
13887 atomic_set(&mce_executing, 0);
13888 atomic_set(&mce_callin, 0);
13889 atomic_set(&global_nwo, 0);
13890diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13891index ef3cd31..9d2f6ab 100644
13892--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
13893+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13894@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13895 return ret;
13896 }
13897
13898-static struct sysfs_ops threshold_ops = {
13899+static const struct sysfs_ops threshold_ops = {
13900 .show = show,
13901 .store = store,
13902 };
13903diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13904index 5c0e653..1e82c7c 100644
13905--- a/arch/x86/kernel/cpu/mcheck/p5.c
13906+++ b/arch/x86/kernel/cpu/mcheck/p5.c
13907@@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13908 if (!cpu_has(c, X86_FEATURE_MCE))
13909 return;
13910
13911+ pax_open_kernel();
13912 machine_check_vector = pentium_machine_check;
13913+ pax_close_kernel();
13914 /* Make sure the vector pointer is visible before we enable MCEs: */
13915 wmb();
13916
13917diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13918index 54060f5..e6ba93d 100644
13919--- a/arch/x86/kernel/cpu/mcheck/winchip.c
13920+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13921@@ -24,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13922 {
13923 u32 lo, hi;
13924
13925+ pax_open_kernel();
13926 machine_check_vector = winchip_machine_check;
13927+ pax_close_kernel();
13928 /* Make sure the vector pointer is visible before we enable MCEs: */
13929 wmb();
13930
13931diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
13932index 33af141..92ba9cd 100644
13933--- a/arch/x86/kernel/cpu/mtrr/amd.c
13934+++ b/arch/x86/kernel/cpu/mtrr/amd.c
13935@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
13936 return 0;
13937 }
13938
13939-static struct mtrr_ops amd_mtrr_ops = {
13940+static const struct mtrr_ops amd_mtrr_ops = {
13941 .vendor = X86_VENDOR_AMD,
13942 .set = amd_set_mtrr,
13943 .get = amd_get_mtrr,
13944diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
13945index de89f14..316fe3e 100644
13946--- a/arch/x86/kernel/cpu/mtrr/centaur.c
13947+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
13948@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
13949 return 0;
13950 }
13951
13952-static struct mtrr_ops centaur_mtrr_ops = {
13953+static const struct mtrr_ops centaur_mtrr_ops = {
13954 .vendor = X86_VENDOR_CENTAUR,
13955 .set = centaur_set_mcr,
13956 .get = centaur_get_mcr,
13957diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
13958index 228d982..68a3343 100644
13959--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
13960+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
13961@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
13962 post_set();
13963 }
13964
13965-static struct mtrr_ops cyrix_mtrr_ops = {
13966+static const struct mtrr_ops cyrix_mtrr_ops = {
13967 .vendor = X86_VENDOR_CYRIX,
13968 .set_all = cyrix_set_all,
13969 .set = cyrix_set_arr,
13970diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
13971index 55da0c5..4d75584 100644
13972--- a/arch/x86/kernel/cpu/mtrr/generic.c
13973+++ b/arch/x86/kernel/cpu/mtrr/generic.c
13974@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
13975 /*
13976 * Generic structure...
13977 */
13978-struct mtrr_ops generic_mtrr_ops = {
13979+const struct mtrr_ops generic_mtrr_ops = {
13980 .use_intel_if = 1,
13981 .set_all = generic_set_all,
13982 .get = generic_get_mtrr,
13983diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
13984index fd60f09..c94ef52 100644
13985--- a/arch/x86/kernel/cpu/mtrr/main.c
13986+++ b/arch/x86/kernel/cpu/mtrr/main.c
13987@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
13988 u64 size_or_mask, size_and_mask;
13989 static bool mtrr_aps_delayed_init;
13990
13991-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
13992+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
13993
13994-struct mtrr_ops *mtrr_if;
13995+const struct mtrr_ops *mtrr_if;
13996
13997 static void set_mtrr(unsigned int reg, unsigned long base,
13998 unsigned long size, mtrr_type type);
13999
14000-void set_mtrr_ops(struct mtrr_ops *ops)
14001+void set_mtrr_ops(const struct mtrr_ops *ops)
14002 {
14003 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14004 mtrr_ops[ops->vendor] = ops;
14005diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14006index a501dee..816c719 100644
14007--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14008+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14009@@ -25,14 +25,14 @@ struct mtrr_ops {
14010 int (*validate_add_page)(unsigned long base, unsigned long size,
14011 unsigned int type);
14012 int (*have_wrcomb)(void);
14013-};
14014+} __do_const;
14015
14016 extern int generic_get_free_region(unsigned long base, unsigned long size,
14017 int replace_reg);
14018 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14019 unsigned int type);
14020
14021-extern struct mtrr_ops generic_mtrr_ops;
14022+extern const struct mtrr_ops generic_mtrr_ops;
14023
14024 extern int positive_have_wrcomb(void);
14025
14026@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14027 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14028 void get_mtrr_state(void);
14029
14030-extern void set_mtrr_ops(struct mtrr_ops *ops);
14031+extern void set_mtrr_ops(const struct mtrr_ops *ops);
14032
14033 extern u64 size_or_mask, size_and_mask;
14034-extern struct mtrr_ops *mtrr_if;
14035+extern const struct mtrr_ops *mtrr_if;
14036
14037 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14038 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14039diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14040index 0ff02ca..fc49a60 100644
14041--- a/arch/x86/kernel/cpu/perf_event.c
14042+++ b/arch/x86/kernel/cpu/perf_event.c
14043@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14044 * count to the generic event atomically:
14045 */
14046 again:
14047- prev_raw_count = atomic64_read(&hwc->prev_count);
14048+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14049 rdmsrl(hwc->event_base + idx, new_raw_count);
14050
14051- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14052+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14053 new_raw_count) != prev_raw_count)
14054 goto again;
14055
14056@@ -741,7 +741,7 @@ again:
14057 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14058 delta >>= shift;
14059
14060- atomic64_add(delta, &event->count);
14061+ atomic64_add_unchecked(delta, &event->count);
14062 atomic64_sub(delta, &hwc->period_left);
14063
14064 return new_raw_count;
14065@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14066 * The hw event starts counting from this event offset,
14067 * mark it to be able to extra future deltas:
14068 */
14069- atomic64_set(&hwc->prev_count, (u64)-left);
14070+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14071
14072 err = checking_wrmsrl(hwc->event_base + idx,
14073 (u64)(-left) & x86_pmu.event_mask);
14074@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14075 break;
14076
14077 callchain_store(entry, frame.return_address);
14078- fp = frame.next_frame;
14079+ fp = (__force const void __user *)frame.next_frame;
14080 }
14081 }
14082
14083diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14084index 898df97..9e82503 100644
14085--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14086+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14087@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14088
14089 /* Interface defining a CPU specific perfctr watchdog */
14090 struct wd_ops {
14091- int (*reserve)(void);
14092- void (*unreserve)(void);
14093- int (*setup)(unsigned nmi_hz);
14094- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14095- void (*stop)(void);
14096+ int (* const reserve)(void);
14097+ void (* const unreserve)(void);
14098+ int (* const setup)(unsigned nmi_hz);
14099+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14100+ void (* const stop)(void);
14101 unsigned perfctr;
14102 unsigned evntsel;
14103 u64 checkbit;
14104@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14105 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14106 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14107
14108+/* cannot be const */
14109 static struct wd_ops intel_arch_wd_ops;
14110
14111 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14112@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14113 return 1;
14114 }
14115
14116+/* cannot be const */
14117 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14118 .reserve = single_msr_reserve,
14119 .unreserve = single_msr_unreserve,
14120diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14121index ff95824..2ffdcb5 100644
14122--- a/arch/x86/kernel/crash.c
14123+++ b/arch/x86/kernel/crash.c
14124@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14125 regs = args->regs;
14126
14127 #ifdef CONFIG_X86_32
14128- if (!user_mode_vm(regs)) {
14129+ if (!user_mode(regs)) {
14130 crash_fixup_ss_esp(&fixed_regs, regs);
14131 regs = &fixed_regs;
14132 }
14133diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14134index 37250fe..bf2ec74 100644
14135--- a/arch/x86/kernel/doublefault_32.c
14136+++ b/arch/x86/kernel/doublefault_32.c
14137@@ -11,7 +11,7 @@
14138
14139 #define DOUBLEFAULT_STACKSIZE (1024)
14140 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14141-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14142+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14143
14144 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14145
14146@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14147 unsigned long gdt, tss;
14148
14149 store_gdt(&gdt_desc);
14150- gdt = gdt_desc.address;
14151+ gdt = (unsigned long)gdt_desc.address;
14152
14153 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14154
14155@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14156 /* 0x2 bit is always set */
14157 .flags = X86_EFLAGS_SF | 0x2,
14158 .sp = STACK_START,
14159- .es = __USER_DS,
14160+ .es = __KERNEL_DS,
14161 .cs = __KERNEL_CS,
14162 .ss = __KERNEL_DS,
14163- .ds = __USER_DS,
14164+ .ds = __KERNEL_DS,
14165 .fs = __KERNEL_PERCPU,
14166
14167 .__cr3 = __pa_nodebug(swapper_pg_dir),
14168diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14169index 2d8a371..4fa6ae6 100644
14170--- a/arch/x86/kernel/dumpstack.c
14171+++ b/arch/x86/kernel/dumpstack.c
14172@@ -2,6 +2,9 @@
14173 * Copyright (C) 1991, 1992 Linus Torvalds
14174 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14175 */
14176+#ifdef CONFIG_GRKERNSEC_HIDESYM
14177+#define __INCLUDED_BY_HIDESYM 1
14178+#endif
14179 #include <linux/kallsyms.h>
14180 #include <linux/kprobes.h>
14181 #include <linux/uaccess.h>
14182@@ -28,7 +31,7 @@ static int die_counter;
14183
14184 void printk_address(unsigned long address, int reliable)
14185 {
14186- printk(" [<%p>] %s%pS\n", (void *) address,
14187+ printk(" [<%p>] %s%pA\n", (void *) address,
14188 reliable ? "" : "? ", (void *) address);
14189 }
14190
14191@@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14192 static void
14193 print_ftrace_graph_addr(unsigned long addr, void *data,
14194 const struct stacktrace_ops *ops,
14195- struct thread_info *tinfo, int *graph)
14196+ struct task_struct *task, int *graph)
14197 {
14198- struct task_struct *task = tinfo->task;
14199 unsigned long ret_addr;
14200 int index = task->curr_ret_stack;
14201
14202@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14203 static inline void
14204 print_ftrace_graph_addr(unsigned long addr, void *data,
14205 const struct stacktrace_ops *ops,
14206- struct thread_info *tinfo, int *graph)
14207+ struct task_struct *task, int *graph)
14208 { }
14209 #endif
14210
14211@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14212 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14213 */
14214
14215-static inline int valid_stack_ptr(struct thread_info *tinfo,
14216- void *p, unsigned int size, void *end)
14217+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14218 {
14219- void *t = tinfo;
14220 if (end) {
14221 if (p < end && p >= (end-THREAD_SIZE))
14222 return 1;
14223@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14224 }
14225
14226 unsigned long
14227-print_context_stack(struct thread_info *tinfo,
14228+print_context_stack(struct task_struct *task, void *stack_start,
14229 unsigned long *stack, unsigned long bp,
14230 const struct stacktrace_ops *ops, void *data,
14231 unsigned long *end, int *graph)
14232 {
14233 struct stack_frame *frame = (struct stack_frame *)bp;
14234
14235- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14236+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14237 unsigned long addr;
14238
14239 addr = *stack;
14240@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14241 } else {
14242 ops->address(data, addr, 0);
14243 }
14244- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14245+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14246 }
14247 stack++;
14248 }
14249@@ -180,7 +180,7 @@ void dump_stack(void)
14250 #endif
14251
14252 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14253- current->pid, current->comm, print_tainted(),
14254+ task_pid_nr(current), current->comm, print_tainted(),
14255 init_utsname()->release,
14256 (int)strcspn(init_utsname()->version, " "),
14257 init_utsname()->version);
14258@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
14259 return flags;
14260 }
14261
14262+extern void gr_handle_kernel_exploit(void);
14263+
14264 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14265 {
14266 if (regs && kexec_should_crash(current))
14267@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14268 panic("Fatal exception in interrupt");
14269 if (panic_on_oops)
14270 panic("Fatal exception");
14271- do_exit(signr);
14272+
14273+ gr_handle_kernel_exploit();
14274+
14275+ do_group_exit(signr);
14276 }
14277
14278 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14279@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14280 unsigned long flags = oops_begin();
14281 int sig = SIGSEGV;
14282
14283- if (!user_mode_vm(regs))
14284+ if (!user_mode(regs))
14285 report_bug(regs->ip, regs);
14286
14287 if (__die(str, regs, err))
14288diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
14289index 81086c2..13e8b17 100644
14290--- a/arch/x86/kernel/dumpstack.h
14291+++ b/arch/x86/kernel/dumpstack.h
14292@@ -15,7 +15,7 @@
14293 #endif
14294
14295 extern unsigned long
14296-print_context_stack(struct thread_info *tinfo,
14297+print_context_stack(struct task_struct *task, void *stack_start,
14298 unsigned long *stack, unsigned long bp,
14299 const struct stacktrace_ops *ops, void *data,
14300 unsigned long *end, int *graph);
14301diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14302index f7dd2a7..504f53b 100644
14303--- a/arch/x86/kernel/dumpstack_32.c
14304+++ b/arch/x86/kernel/dumpstack_32.c
14305@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14306 #endif
14307
14308 for (;;) {
14309- struct thread_info *context;
14310+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14311+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14312
14313- context = (struct thread_info *)
14314- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14315- bp = print_context_stack(context, stack, bp, ops,
14316- data, NULL, &graph);
14317-
14318- stack = (unsigned long *)context->previous_esp;
14319- if (!stack)
14320+ if (stack_start == task_stack_page(task))
14321 break;
14322+ stack = *(unsigned long **)stack_start;
14323 if (ops->stack(data, "IRQ") < 0)
14324 break;
14325 touch_nmi_watchdog();
14326@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
14327 * When in-kernel, we also print out the stack and code at the
14328 * time of the fault..
14329 */
14330- if (!user_mode_vm(regs)) {
14331+ if (!user_mode(regs)) {
14332 unsigned int code_prologue = code_bytes * 43 / 64;
14333 unsigned int code_len = code_bytes;
14334 unsigned char c;
14335 u8 *ip;
14336+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14337
14338 printk(KERN_EMERG "Stack:\n");
14339 show_stack_log_lvl(NULL, regs, &regs->sp,
14340@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
14341
14342 printk(KERN_EMERG "Code: ");
14343
14344- ip = (u8 *)regs->ip - code_prologue;
14345+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14346 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14347 /* try starting at IP */
14348- ip = (u8 *)regs->ip;
14349+ ip = (u8 *)regs->ip + cs_base;
14350 code_len = code_len - code_prologue + 1;
14351 }
14352 for (i = 0; i < code_len; i++, ip++) {
14353@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
14354 printk(" Bad EIP value.");
14355 break;
14356 }
14357- if (ip == (u8 *)regs->ip)
14358+ if (ip == (u8 *)regs->ip + cs_base)
14359 printk("<%02x> ", c);
14360 else
14361 printk("%02x ", c);
14362@@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
14363 printk("\n");
14364 }
14365
14366+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14367+void pax_check_alloca(unsigned long size)
14368+{
14369+ unsigned long sp = (unsigned long)&sp, stack_left;
14370+
14371+ /* all kernel stacks are of the same size */
14372+ stack_left = sp & (THREAD_SIZE - 1);
14373+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14374+}
14375+EXPORT_SYMBOL(pax_check_alloca);
14376+#endif
14377+
14378 int is_valid_bugaddr(unsigned long ip)
14379 {
14380 unsigned short ud2;
14381
14382+ ip = ktla_ktva(ip);
14383 if (ip < PAGE_OFFSET)
14384 return 0;
14385 if (probe_kernel_address((unsigned short *)ip, ud2))
14386diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14387index a071e6b..36cd585 100644
14388--- a/arch/x86/kernel/dumpstack_64.c
14389+++ b/arch/x86/kernel/dumpstack_64.c
14390@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14391 unsigned long *irq_stack_end =
14392 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14393 unsigned used = 0;
14394- struct thread_info *tinfo;
14395 int graph = 0;
14396+ void *stack_start;
14397
14398 if (!task)
14399 task = current;
14400@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14401 * current stack address. If the stacks consist of nested
14402 * exceptions
14403 */
14404- tinfo = task_thread_info(task);
14405 for (;;) {
14406 char *id;
14407 unsigned long *estack_end;
14408+
14409 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14410 &used, &id);
14411
14412@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14413 if (ops->stack(data, id) < 0)
14414 break;
14415
14416- bp = print_context_stack(tinfo, stack, bp, ops,
14417+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14418 data, estack_end, &graph);
14419 ops->stack(data, "<EOE>");
14420 /*
14421@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14422 if (stack >= irq_stack && stack < irq_stack_end) {
14423 if (ops->stack(data, "IRQ") < 0)
14424 break;
14425- bp = print_context_stack(tinfo, stack, bp,
14426+ bp = print_context_stack(task, irq_stack, stack, bp,
14427 ops, data, irq_stack_end, &graph);
14428 /*
14429 * We link to the next stack (which would be
14430@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14431 /*
14432 * This handles the process stack:
14433 */
14434- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14435+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14436+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14437 put_cpu();
14438 }
14439 EXPORT_SYMBOL(dump_trace);
14440@@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
14441 return ud2 == 0x0b0f;
14442 }
14443
14444+
14445+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14446+void pax_check_alloca(unsigned long size)
14447+{
14448+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14449+ unsigned cpu, used;
14450+ char *id;
14451+
14452+ /* check the process stack first */
14453+ stack_start = (unsigned long)task_stack_page(current);
14454+ stack_end = stack_start + THREAD_SIZE;
14455+ if (likely(stack_start <= sp && sp < stack_end)) {
14456+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14457+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14458+ return;
14459+ }
14460+
14461+ cpu = get_cpu();
14462+
14463+ /* check the irq stacks */
14464+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14465+ stack_start = stack_end - IRQ_STACK_SIZE;
14466+ if (stack_start <= sp && sp < stack_end) {
14467+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14468+ put_cpu();
14469+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14470+ return;
14471+ }
14472+
14473+ /* check the exception stacks */
14474+ used = 0;
14475+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14476+ stack_start = stack_end - EXCEPTION_STKSZ;
14477+ if (stack_end && stack_start <= sp && sp < stack_end) {
14478+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14479+ put_cpu();
14480+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14481+ return;
14482+ }
14483+
14484+ put_cpu();
14485+
14486+ /* unknown stack */
14487+ BUG();
14488+}
14489+EXPORT_SYMBOL(pax_check_alloca);
14490+#endif
14491diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
14492index a89739a..95e0c48 100644
14493--- a/arch/x86/kernel/e820.c
14494+++ b/arch/x86/kernel/e820.c
14495@@ -733,7 +733,7 @@ struct early_res {
14496 };
14497 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
14498 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
14499- {}
14500+ { 0, 0, {0}, 0 }
14501 };
14502
14503 static int __init find_overlapped_early(u64 start, u64 end)
14504diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14505index b9c830c..1e41a96 100644
14506--- a/arch/x86/kernel/early_printk.c
14507+++ b/arch/x86/kernel/early_printk.c
14508@@ -7,6 +7,7 @@
14509 #include <linux/pci_regs.h>
14510 #include <linux/pci_ids.h>
14511 #include <linux/errno.h>
14512+#include <linux/sched.h>
14513 #include <asm/io.h>
14514 #include <asm/processor.h>
14515 #include <asm/fcntl.h>
14516@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
14517 int n;
14518 va_list ap;
14519
14520+ pax_track_stack();
14521+
14522 va_start(ap, fmt);
14523 n = vscnprintf(buf, sizeof(buf), fmt, ap);
14524 early_console->write(early_console, buf, n);
14525diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
14526index 5cab48e..b025f9b 100644
14527--- a/arch/x86/kernel/efi_32.c
14528+++ b/arch/x86/kernel/efi_32.c
14529@@ -38,70 +38,56 @@
14530 */
14531
14532 static unsigned long efi_rt_eflags;
14533-static pgd_t efi_bak_pg_dir_pointer[2];
14534+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
14535
14536-void efi_call_phys_prelog(void)
14537+void __init efi_call_phys_prelog(void)
14538 {
14539- unsigned long cr4;
14540- unsigned long temp;
14541 struct desc_ptr gdt_descr;
14542
14543+#ifdef CONFIG_PAX_KERNEXEC
14544+ struct desc_struct d;
14545+#endif
14546+
14547 local_irq_save(efi_rt_eflags);
14548
14549- /*
14550- * If I don't have PAE, I should just duplicate two entries in page
14551- * directory. If I have PAE, I just need to duplicate one entry in
14552- * page directory.
14553- */
14554- cr4 = read_cr4_safe();
14555-
14556- if (cr4 & X86_CR4_PAE) {
14557- efi_bak_pg_dir_pointer[0].pgd =
14558- swapper_pg_dir[pgd_index(0)].pgd;
14559- swapper_pg_dir[0].pgd =
14560- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14561- } else {
14562- efi_bak_pg_dir_pointer[0].pgd =
14563- swapper_pg_dir[pgd_index(0)].pgd;
14564- efi_bak_pg_dir_pointer[1].pgd =
14565- swapper_pg_dir[pgd_index(0x400000)].pgd;
14566- swapper_pg_dir[pgd_index(0)].pgd =
14567- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14568- temp = PAGE_OFFSET + 0x400000;
14569- swapper_pg_dir[pgd_index(0x400000)].pgd =
14570- swapper_pg_dir[pgd_index(temp)].pgd;
14571- }
14572+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
14573+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14574+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
14575
14576 /*
14577 * After the lock is released, the original page table is restored.
14578 */
14579 __flush_tlb_all();
14580
14581+#ifdef CONFIG_PAX_KERNEXEC
14582+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
14583+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14584+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
14585+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14586+#endif
14587+
14588 gdt_descr.address = __pa(get_cpu_gdt_table(0));
14589 gdt_descr.size = GDT_SIZE - 1;
14590 load_gdt(&gdt_descr);
14591 }
14592
14593-void efi_call_phys_epilog(void)
14594+void __init efi_call_phys_epilog(void)
14595 {
14596- unsigned long cr4;
14597 struct desc_ptr gdt_descr;
14598
14599+#ifdef CONFIG_PAX_KERNEXEC
14600+ struct desc_struct d;
14601+
14602+ memset(&d, 0, sizeof d);
14603+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14604+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14605+#endif
14606+
14607 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
14608 gdt_descr.size = GDT_SIZE - 1;
14609 load_gdt(&gdt_descr);
14610
14611- cr4 = read_cr4_safe();
14612-
14613- if (cr4 & X86_CR4_PAE) {
14614- swapper_pg_dir[pgd_index(0)].pgd =
14615- efi_bak_pg_dir_pointer[0].pgd;
14616- } else {
14617- swapper_pg_dir[pgd_index(0)].pgd =
14618- efi_bak_pg_dir_pointer[0].pgd;
14619- swapper_pg_dir[pgd_index(0x400000)].pgd =
14620- efi_bak_pg_dir_pointer[1].pgd;
14621- }
14622+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
14623
14624 /*
14625 * After the lock is released, the original page table is restored.
14626diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
14627index fbe66e6..c5c0dd2 100644
14628--- a/arch/x86/kernel/efi_stub_32.S
14629+++ b/arch/x86/kernel/efi_stub_32.S
14630@@ -6,7 +6,9 @@
14631 */
14632
14633 #include <linux/linkage.h>
14634+#include <linux/init.h>
14635 #include <asm/page_types.h>
14636+#include <asm/segment.h>
14637
14638 /*
14639 * efi_call_phys(void *, ...) is a function with variable parameters.
14640@@ -20,7 +22,7 @@
14641 * service functions will comply with gcc calling convention, too.
14642 */
14643
14644-.text
14645+__INIT
14646 ENTRY(efi_call_phys)
14647 /*
14648 * 0. The function can only be called in Linux kernel. So CS has been
14649@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
14650 * The mapping of lower virtual memory has been created in prelog and
14651 * epilog.
14652 */
14653- movl $1f, %edx
14654- subl $__PAGE_OFFSET, %edx
14655- jmp *%edx
14656+ movl $(__KERNEXEC_EFI_DS), %edx
14657+ mov %edx, %ds
14658+ mov %edx, %es
14659+ mov %edx, %ss
14660+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
14661 1:
14662
14663 /*
14664@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
14665 * parameter 2, ..., param n. To make things easy, we save the return
14666 * address of efi_call_phys in a global variable.
14667 */
14668- popl %edx
14669- movl %edx, saved_return_addr
14670- /* get the function pointer into ECX*/
14671- popl %ecx
14672- movl %ecx, efi_rt_function_ptr
14673- movl $2f, %edx
14674- subl $__PAGE_OFFSET, %edx
14675- pushl %edx
14676+ popl (saved_return_addr)
14677+ popl (efi_rt_function_ptr)
14678
14679 /*
14680 * 3. Clear PG bit in %CR0.
14681@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
14682 /*
14683 * 5. Call the physical function.
14684 */
14685- jmp *%ecx
14686+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
14687
14688-2:
14689 /*
14690 * 6. After EFI runtime service returns, control will return to
14691 * following instruction. We'd better readjust stack pointer first.
14692@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
14693 movl %cr0, %edx
14694 orl $0x80000000, %edx
14695 movl %edx, %cr0
14696- jmp 1f
14697-1:
14698+
14699 /*
14700 * 8. Now restore the virtual mode from flat mode by
14701 * adding EIP with PAGE_OFFSET.
14702 */
14703- movl $1f, %edx
14704- jmp *%edx
14705+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
14706 1:
14707+ movl $(__KERNEL_DS), %edx
14708+ mov %edx, %ds
14709+ mov %edx, %es
14710+ mov %edx, %ss
14711
14712 /*
14713 * 9. Balance the stack. And because EAX contain the return value,
14714 * we'd better not clobber it.
14715 */
14716- leal efi_rt_function_ptr, %edx
14717- movl (%edx), %ecx
14718- pushl %ecx
14719+ pushl (efi_rt_function_ptr)
14720
14721 /*
14722- * 10. Push the saved return address onto the stack and return.
14723+ * 10. Return to the saved return address.
14724 */
14725- leal saved_return_addr, %edx
14726- movl (%edx), %ecx
14727- pushl %ecx
14728- ret
14729+ jmpl *(saved_return_addr)
14730 ENDPROC(efi_call_phys)
14731 .previous
14732
14733-.data
14734+__INITDATA
14735 saved_return_addr:
14736 .long 0
14737 efi_rt_function_ptr:
14738diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
14739index 4c07cca..2c8427d 100644
14740--- a/arch/x86/kernel/efi_stub_64.S
14741+++ b/arch/x86/kernel/efi_stub_64.S
14742@@ -7,6 +7,7 @@
14743 */
14744
14745 #include <linux/linkage.h>
14746+#include <asm/alternative-asm.h>
14747
14748 #define SAVE_XMM \
14749 mov %rsp, %rax; \
14750@@ -40,6 +41,7 @@ ENTRY(efi_call0)
14751 call *%rdi
14752 addq $32, %rsp
14753 RESTORE_XMM
14754+ pax_force_retaddr 0, 1
14755 ret
14756 ENDPROC(efi_call0)
14757
14758@@ -50,6 +52,7 @@ ENTRY(efi_call1)
14759 call *%rdi
14760 addq $32, %rsp
14761 RESTORE_XMM
14762+ pax_force_retaddr 0, 1
14763 ret
14764 ENDPROC(efi_call1)
14765
14766@@ -60,6 +63,7 @@ ENTRY(efi_call2)
14767 call *%rdi
14768 addq $32, %rsp
14769 RESTORE_XMM
14770+ pax_force_retaddr 0, 1
14771 ret
14772 ENDPROC(efi_call2)
14773
14774@@ -71,6 +75,7 @@ ENTRY(efi_call3)
14775 call *%rdi
14776 addq $32, %rsp
14777 RESTORE_XMM
14778+ pax_force_retaddr 0, 1
14779 ret
14780 ENDPROC(efi_call3)
14781
14782@@ -83,6 +88,7 @@ ENTRY(efi_call4)
14783 call *%rdi
14784 addq $32, %rsp
14785 RESTORE_XMM
14786+ pax_force_retaddr 0, 1
14787 ret
14788 ENDPROC(efi_call4)
14789
14790@@ -96,6 +102,7 @@ ENTRY(efi_call5)
14791 call *%rdi
14792 addq $48, %rsp
14793 RESTORE_XMM
14794+ pax_force_retaddr 0, 1
14795 ret
14796 ENDPROC(efi_call5)
14797
14798@@ -112,5 +119,6 @@ ENTRY(efi_call6)
14799 call *%rdi
14800 addq $48, %rsp
14801 RESTORE_XMM
14802+ pax_force_retaddr 0, 1
14803 ret
14804 ENDPROC(efi_call6)
14805diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14806index c097e7d..c689cf4 100644
14807--- a/arch/x86/kernel/entry_32.S
14808+++ b/arch/x86/kernel/entry_32.S
14809@@ -185,13 +185,146 @@
14810 /*CFI_REL_OFFSET gs, PT_GS*/
14811 .endm
14812 .macro SET_KERNEL_GS reg
14813+
14814+#ifdef CONFIG_CC_STACKPROTECTOR
14815 movl $(__KERNEL_STACK_CANARY), \reg
14816+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14817+ movl $(__USER_DS), \reg
14818+#else
14819+ xorl \reg, \reg
14820+#endif
14821+
14822 movl \reg, %gs
14823 .endm
14824
14825 #endif /* CONFIG_X86_32_LAZY_GS */
14826
14827-.macro SAVE_ALL
14828+.macro pax_enter_kernel
14829+#ifdef CONFIG_PAX_KERNEXEC
14830+ call pax_enter_kernel
14831+#endif
14832+.endm
14833+
14834+.macro pax_exit_kernel
14835+#ifdef CONFIG_PAX_KERNEXEC
14836+ call pax_exit_kernel
14837+#endif
14838+.endm
14839+
14840+#ifdef CONFIG_PAX_KERNEXEC
14841+ENTRY(pax_enter_kernel)
14842+#ifdef CONFIG_PARAVIRT
14843+ pushl %eax
14844+ pushl %ecx
14845+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14846+ mov %eax, %esi
14847+#else
14848+ mov %cr0, %esi
14849+#endif
14850+ bts $16, %esi
14851+ jnc 1f
14852+ mov %cs, %esi
14853+ cmp $__KERNEL_CS, %esi
14854+ jz 3f
14855+ ljmp $__KERNEL_CS, $3f
14856+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14857+2:
14858+#ifdef CONFIG_PARAVIRT
14859+ mov %esi, %eax
14860+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14861+#else
14862+ mov %esi, %cr0
14863+#endif
14864+3:
14865+#ifdef CONFIG_PARAVIRT
14866+ popl %ecx
14867+ popl %eax
14868+#endif
14869+ ret
14870+ENDPROC(pax_enter_kernel)
14871+
14872+ENTRY(pax_exit_kernel)
14873+#ifdef CONFIG_PARAVIRT
14874+ pushl %eax
14875+ pushl %ecx
14876+#endif
14877+ mov %cs, %esi
14878+ cmp $__KERNEXEC_KERNEL_CS, %esi
14879+ jnz 2f
14880+#ifdef CONFIG_PARAVIRT
14881+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14882+ mov %eax, %esi
14883+#else
14884+ mov %cr0, %esi
14885+#endif
14886+ btr $16, %esi
14887+ ljmp $__KERNEL_CS, $1f
14888+1:
14889+#ifdef CONFIG_PARAVIRT
14890+ mov %esi, %eax
14891+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14892+#else
14893+ mov %esi, %cr0
14894+#endif
14895+2:
14896+#ifdef CONFIG_PARAVIRT
14897+ popl %ecx
14898+ popl %eax
14899+#endif
14900+ ret
14901+ENDPROC(pax_exit_kernel)
14902+#endif
14903+
14904+.macro pax_erase_kstack
14905+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14906+ call pax_erase_kstack
14907+#endif
14908+.endm
14909+
14910+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14911+/*
14912+ * ebp: thread_info
14913+ * ecx, edx: can be clobbered
14914+ */
14915+ENTRY(pax_erase_kstack)
14916+ pushl %edi
14917+ pushl %eax
14918+
14919+ mov TI_lowest_stack(%ebp), %edi
14920+ mov $-0xBEEF, %eax
14921+ std
14922+
14923+1: mov %edi, %ecx
14924+ and $THREAD_SIZE_asm - 1, %ecx
14925+ shr $2, %ecx
14926+ repne scasl
14927+ jecxz 2f
14928+
14929+ cmp $2*16, %ecx
14930+ jc 2f
14931+
14932+ mov $2*16, %ecx
14933+ repe scasl
14934+ jecxz 2f
14935+ jne 1b
14936+
14937+2: cld
14938+ mov %esp, %ecx
14939+ sub %edi, %ecx
14940+ shr $2, %ecx
14941+ rep stosl
14942+
14943+ mov TI_task_thread_sp0(%ebp), %edi
14944+ sub $128, %edi
14945+ mov %edi, TI_lowest_stack(%ebp)
14946+
14947+ popl %eax
14948+ popl %edi
14949+ ret
14950+ENDPROC(pax_erase_kstack)
14951+#endif
14952+
14953+.macro __SAVE_ALL _DS
14954 cld
14955 PUSH_GS
14956 pushl %fs
14957@@ -224,7 +357,7 @@
14958 pushl %ebx
14959 CFI_ADJUST_CFA_OFFSET 4
14960 CFI_REL_OFFSET ebx, 0
14961- movl $(__USER_DS), %edx
14962+ movl $\_DS, %edx
14963 movl %edx, %ds
14964 movl %edx, %es
14965 movl $(__KERNEL_PERCPU), %edx
14966@@ -232,6 +365,15 @@
14967 SET_KERNEL_GS %edx
14968 .endm
14969
14970+.macro SAVE_ALL
14971+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
14972+ __SAVE_ALL __KERNEL_DS
14973+ pax_enter_kernel
14974+#else
14975+ __SAVE_ALL __USER_DS
14976+#endif
14977+.endm
14978+
14979 .macro RESTORE_INT_REGS
14980 popl %ebx
14981 CFI_ADJUST_CFA_OFFSET -4
14982@@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
14983 CFI_ADJUST_CFA_OFFSET -4
14984 jmp syscall_exit
14985 CFI_ENDPROC
14986-END(ret_from_fork)
14987+ENDPROC(ret_from_fork)
14988
14989 /*
14990 * Return to user mode is not as complex as all this looks,
14991@@ -352,7 +494,15 @@ check_userspace:
14992 movb PT_CS(%esp), %al
14993 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
14994 cmpl $USER_RPL, %eax
14995+
14996+#ifdef CONFIG_PAX_KERNEXEC
14997+ jae resume_userspace
14998+
14999+ PAX_EXIT_KERNEL
15000+ jmp resume_kernel
15001+#else
15002 jb resume_kernel # not returning to v8086 or userspace
15003+#endif
15004
15005 ENTRY(resume_userspace)
15006 LOCKDEP_SYS_EXIT
15007@@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15008 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15009 # int/exception return?
15010 jne work_pending
15011- jmp restore_all
15012-END(ret_from_exception)
15013+ jmp restore_all_pax
15014+ENDPROC(ret_from_exception)
15015
15016 #ifdef CONFIG_PREEMPT
15017 ENTRY(resume_kernel)
15018@@ -380,7 +530,7 @@ need_resched:
15019 jz restore_all
15020 call preempt_schedule_irq
15021 jmp need_resched
15022-END(resume_kernel)
15023+ENDPROC(resume_kernel)
15024 #endif
15025 CFI_ENDPROC
15026
15027@@ -414,25 +564,36 @@ sysenter_past_esp:
15028 /*CFI_REL_OFFSET cs, 0*/
15029 /*
15030 * Push current_thread_info()->sysenter_return to the stack.
15031- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15032- * pushed above; +8 corresponds to copy_thread's esp0 setting.
15033 */
15034- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15035+ pushl $0
15036 CFI_ADJUST_CFA_OFFSET 4
15037 CFI_REL_OFFSET eip, 0
15038
15039 pushl %eax
15040 CFI_ADJUST_CFA_OFFSET 4
15041 SAVE_ALL
15042+ GET_THREAD_INFO(%ebp)
15043+ movl TI_sysenter_return(%ebp),%ebp
15044+ movl %ebp,PT_EIP(%esp)
15045 ENABLE_INTERRUPTS(CLBR_NONE)
15046
15047 /*
15048 * Load the potential sixth argument from user stack.
15049 * Careful about security.
15050 */
15051+ movl PT_OLDESP(%esp),%ebp
15052+
15053+#ifdef CONFIG_PAX_MEMORY_UDEREF
15054+ mov PT_OLDSS(%esp),%ds
15055+1: movl %ds:(%ebp),%ebp
15056+ push %ss
15057+ pop %ds
15058+#else
15059 cmpl $__PAGE_OFFSET-3,%ebp
15060 jae syscall_fault
15061 1: movl (%ebp),%ebp
15062+#endif
15063+
15064 movl %ebp,PT_EBP(%esp)
15065 .section __ex_table,"a"
15066 .align 4
15067@@ -455,12 +616,24 @@ sysenter_do_call:
15068 testl $_TIF_ALLWORK_MASK, %ecx
15069 jne sysexit_audit
15070 sysenter_exit:
15071+
15072+#ifdef CONFIG_PAX_RANDKSTACK
15073+ pushl_cfi %eax
15074+ movl %esp, %eax
15075+ call pax_randomize_kstack
15076+ popl_cfi %eax
15077+#endif
15078+
15079+ pax_erase_kstack
15080+
15081 /* if something modifies registers it must also disable sysexit */
15082 movl PT_EIP(%esp), %edx
15083 movl PT_OLDESP(%esp), %ecx
15084 xorl %ebp,%ebp
15085 TRACE_IRQS_ON
15086 1: mov PT_FS(%esp), %fs
15087+2: mov PT_DS(%esp), %ds
15088+3: mov PT_ES(%esp), %es
15089 PTGS_TO_GS
15090 ENABLE_INTERRUPTS_SYSEXIT
15091
15092@@ -477,6 +650,9 @@ sysenter_audit:
15093 movl %eax,%edx /* 2nd arg: syscall number */
15094 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15095 call audit_syscall_entry
15096+
15097+ pax_erase_kstack
15098+
15099 pushl %ebx
15100 CFI_ADJUST_CFA_OFFSET 4
15101 movl PT_EAX(%esp),%eax /* reload syscall number */
15102@@ -504,11 +680,17 @@ sysexit_audit:
15103
15104 CFI_ENDPROC
15105 .pushsection .fixup,"ax"
15106-2: movl $0,PT_FS(%esp)
15107+4: movl $0,PT_FS(%esp)
15108+ jmp 1b
15109+5: movl $0,PT_DS(%esp)
15110+ jmp 1b
15111+6: movl $0,PT_ES(%esp)
15112 jmp 1b
15113 .section __ex_table,"a"
15114 .align 4
15115- .long 1b,2b
15116+ .long 1b,4b
15117+ .long 2b,5b
15118+ .long 3b,6b
15119 .popsection
15120 PTGS_TO_GS_EX
15121 ENDPROC(ia32_sysenter_target)
15122@@ -538,6 +720,15 @@ syscall_exit:
15123 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15124 jne syscall_exit_work
15125
15126+restore_all_pax:
15127+
15128+#ifdef CONFIG_PAX_RANDKSTACK
15129+ movl %esp, %eax
15130+ call pax_randomize_kstack
15131+#endif
15132+
15133+ pax_erase_kstack
15134+
15135 restore_all:
15136 TRACE_IRQS_IRET
15137 restore_all_notrace:
15138@@ -602,10 +793,29 @@ ldt_ss:
15139 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15140 mov %dx, %ax /* eax: new kernel esp */
15141 sub %eax, %edx /* offset (low word is 0) */
15142- PER_CPU(gdt_page, %ebx)
15143+#ifdef CONFIG_SMP
15144+ movl PER_CPU_VAR(cpu_number), %ebx
15145+ shll $PAGE_SHIFT_asm, %ebx
15146+ addl $cpu_gdt_table, %ebx
15147+#else
15148+ movl $cpu_gdt_table, %ebx
15149+#endif
15150 shr $16, %edx
15151+
15152+#ifdef CONFIG_PAX_KERNEXEC
15153+ mov %cr0, %esi
15154+ btr $16, %esi
15155+ mov %esi, %cr0
15156+#endif
15157+
15158 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15159 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15160+
15161+#ifdef CONFIG_PAX_KERNEXEC
15162+ bts $16, %esi
15163+ mov %esi, %cr0
15164+#endif
15165+
15166 pushl $__ESPFIX_SS
15167 CFI_ADJUST_CFA_OFFSET 4
15168 push %eax /* new kernel esp */
15169@@ -636,36 +846,30 @@ work_resched:
15170 movl TI_flags(%ebp), %ecx
15171 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15172 # than syscall tracing?
15173- jz restore_all
15174+ jz restore_all_pax
15175 testb $_TIF_NEED_RESCHED, %cl
15176 jnz work_resched
15177
15178 work_notifysig: # deal with pending signals and
15179 # notify-resume requests
15180+ movl %esp, %eax
15181 #ifdef CONFIG_VM86
15182 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15183- movl %esp, %eax
15184- jne work_notifysig_v86 # returning to kernel-space or
15185+ jz 1f # returning to kernel-space or
15186 # vm86-space
15187- xorl %edx, %edx
15188- call do_notify_resume
15189- jmp resume_userspace_sig
15190
15191- ALIGN
15192-work_notifysig_v86:
15193 pushl %ecx # save ti_flags for do_notify_resume
15194 CFI_ADJUST_CFA_OFFSET 4
15195 call save_v86_state # %eax contains pt_regs pointer
15196 popl %ecx
15197 CFI_ADJUST_CFA_OFFSET -4
15198 movl %eax, %esp
15199-#else
15200- movl %esp, %eax
15201+1:
15202 #endif
15203 xorl %edx, %edx
15204 call do_notify_resume
15205 jmp resume_userspace_sig
15206-END(work_pending)
15207+ENDPROC(work_pending)
15208
15209 # perform syscall exit tracing
15210 ALIGN
15211@@ -673,11 +877,14 @@ syscall_trace_entry:
15212 movl $-ENOSYS,PT_EAX(%esp)
15213 movl %esp, %eax
15214 call syscall_trace_enter
15215+
15216+ pax_erase_kstack
15217+
15218 /* What it returned is what we'll actually use. */
15219 cmpl $(nr_syscalls), %eax
15220 jnae syscall_call
15221 jmp syscall_exit
15222-END(syscall_trace_entry)
15223+ENDPROC(syscall_trace_entry)
15224
15225 # perform syscall exit tracing
15226 ALIGN
15227@@ -690,20 +897,24 @@ syscall_exit_work:
15228 movl %esp, %eax
15229 call syscall_trace_leave
15230 jmp resume_userspace
15231-END(syscall_exit_work)
15232+ENDPROC(syscall_exit_work)
15233 CFI_ENDPROC
15234
15235 RING0_INT_FRAME # can't unwind into user space anyway
15236 syscall_fault:
15237+#ifdef CONFIG_PAX_MEMORY_UDEREF
15238+ push %ss
15239+ pop %ds
15240+#endif
15241 GET_THREAD_INFO(%ebp)
15242 movl $-EFAULT,PT_EAX(%esp)
15243 jmp resume_userspace
15244-END(syscall_fault)
15245+ENDPROC(syscall_fault)
15246
15247 syscall_badsys:
15248 movl $-ENOSYS,PT_EAX(%esp)
15249 jmp resume_userspace
15250-END(syscall_badsys)
15251+ENDPROC(syscall_badsys)
15252 CFI_ENDPROC
15253
15254 /*
15255@@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15256 PTREGSCALL(vm86)
15257 PTREGSCALL(vm86old)
15258
15259+ ALIGN;
15260+ENTRY(kernel_execve)
15261+ push %ebp
15262+ sub $PT_OLDSS+4,%esp
15263+ push %edi
15264+ push %ecx
15265+ push %eax
15266+ lea 3*4(%esp),%edi
15267+ mov $PT_OLDSS/4+1,%ecx
15268+ xorl %eax,%eax
15269+ rep stosl
15270+ pop %eax
15271+ pop %ecx
15272+ pop %edi
15273+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15274+ mov %eax,PT_EBX(%esp)
15275+ mov %edx,PT_ECX(%esp)
15276+ mov %ecx,PT_EDX(%esp)
15277+ mov %esp,%eax
15278+ call sys_execve
15279+ GET_THREAD_INFO(%ebp)
15280+ test %eax,%eax
15281+ jz syscall_exit
15282+ add $PT_OLDSS+4,%esp
15283+ pop %ebp
15284+ ret
15285+
15286 .macro FIXUP_ESPFIX_STACK
15287 /*
15288 * Switch back for ESPFIX stack to the normal zerobased stack
15289@@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
15290 * normal stack and adjusts ESP with the matching offset.
15291 */
15292 /* fixup the stack */
15293- PER_CPU(gdt_page, %ebx)
15294+#ifdef CONFIG_SMP
15295+ movl PER_CPU_VAR(cpu_number), %ebx
15296+ shll $PAGE_SHIFT_asm, %ebx
15297+ addl $cpu_gdt_table, %ebx
15298+#else
15299+ movl $cpu_gdt_table, %ebx
15300+#endif
15301 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
15302 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
15303 shl $16, %eax
15304@@ -793,7 +1037,7 @@ vector=vector+1
15305 .endr
15306 2: jmp common_interrupt
15307 .endr
15308-END(irq_entries_start)
15309+ENDPROC(irq_entries_start)
15310
15311 .previous
15312 END(interrupt)
15313@@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
15314 CFI_ADJUST_CFA_OFFSET 4
15315 jmp error_code
15316 CFI_ENDPROC
15317-END(coprocessor_error)
15318+ENDPROC(coprocessor_error)
15319
15320 ENTRY(simd_coprocessor_error)
15321 RING0_INT_FRAME
15322@@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
15323 CFI_ADJUST_CFA_OFFSET 4
15324 jmp error_code
15325 CFI_ENDPROC
15326-END(simd_coprocessor_error)
15327+ENDPROC(simd_coprocessor_error)
15328
15329 ENTRY(device_not_available)
15330 RING0_INT_FRAME
15331@@ -860,7 +1104,7 @@ ENTRY(device_not_available)
15332 CFI_ADJUST_CFA_OFFSET 4
15333 jmp error_code
15334 CFI_ENDPROC
15335-END(device_not_available)
15336+ENDPROC(device_not_available)
15337
15338 #ifdef CONFIG_PARAVIRT
15339 ENTRY(native_iret)
15340@@ -869,12 +1113,12 @@ ENTRY(native_iret)
15341 .align 4
15342 .long native_iret, iret_exc
15343 .previous
15344-END(native_iret)
15345+ENDPROC(native_iret)
15346
15347 ENTRY(native_irq_enable_sysexit)
15348 sti
15349 sysexit
15350-END(native_irq_enable_sysexit)
15351+ENDPROC(native_irq_enable_sysexit)
15352 #endif
15353
15354 ENTRY(overflow)
15355@@ -885,7 +1129,7 @@ ENTRY(overflow)
15356 CFI_ADJUST_CFA_OFFSET 4
15357 jmp error_code
15358 CFI_ENDPROC
15359-END(overflow)
15360+ENDPROC(overflow)
15361
15362 ENTRY(bounds)
15363 RING0_INT_FRAME
15364@@ -895,7 +1139,7 @@ ENTRY(bounds)
15365 CFI_ADJUST_CFA_OFFSET 4
15366 jmp error_code
15367 CFI_ENDPROC
15368-END(bounds)
15369+ENDPROC(bounds)
15370
15371 ENTRY(invalid_op)
15372 RING0_INT_FRAME
15373@@ -905,7 +1149,7 @@ ENTRY(invalid_op)
15374 CFI_ADJUST_CFA_OFFSET 4
15375 jmp error_code
15376 CFI_ENDPROC
15377-END(invalid_op)
15378+ENDPROC(invalid_op)
15379
15380 ENTRY(coprocessor_segment_overrun)
15381 RING0_INT_FRAME
15382@@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
15383 CFI_ADJUST_CFA_OFFSET 4
15384 jmp error_code
15385 CFI_ENDPROC
15386-END(coprocessor_segment_overrun)
15387+ENDPROC(coprocessor_segment_overrun)
15388
15389 ENTRY(invalid_TSS)
15390 RING0_EC_FRAME
15391@@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
15392 CFI_ADJUST_CFA_OFFSET 4
15393 jmp error_code
15394 CFI_ENDPROC
15395-END(invalid_TSS)
15396+ENDPROC(invalid_TSS)
15397
15398 ENTRY(segment_not_present)
15399 RING0_EC_FRAME
15400@@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
15401 CFI_ADJUST_CFA_OFFSET 4
15402 jmp error_code
15403 CFI_ENDPROC
15404-END(segment_not_present)
15405+ENDPROC(segment_not_present)
15406
15407 ENTRY(stack_segment)
15408 RING0_EC_FRAME
15409@@ -939,7 +1183,7 @@ ENTRY(stack_segment)
15410 CFI_ADJUST_CFA_OFFSET 4
15411 jmp error_code
15412 CFI_ENDPROC
15413-END(stack_segment)
15414+ENDPROC(stack_segment)
15415
15416 ENTRY(alignment_check)
15417 RING0_EC_FRAME
15418@@ -947,7 +1191,7 @@ ENTRY(alignment_check)
15419 CFI_ADJUST_CFA_OFFSET 4
15420 jmp error_code
15421 CFI_ENDPROC
15422-END(alignment_check)
15423+ENDPROC(alignment_check)
15424
15425 ENTRY(divide_error)
15426 RING0_INT_FRAME
15427@@ -957,7 +1201,7 @@ ENTRY(divide_error)
15428 CFI_ADJUST_CFA_OFFSET 4
15429 jmp error_code
15430 CFI_ENDPROC
15431-END(divide_error)
15432+ENDPROC(divide_error)
15433
15434 #ifdef CONFIG_X86_MCE
15435 ENTRY(machine_check)
15436@@ -968,7 +1212,7 @@ ENTRY(machine_check)
15437 CFI_ADJUST_CFA_OFFSET 4
15438 jmp error_code
15439 CFI_ENDPROC
15440-END(machine_check)
15441+ENDPROC(machine_check)
15442 #endif
15443
15444 ENTRY(spurious_interrupt_bug)
15445@@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
15446 CFI_ADJUST_CFA_OFFSET 4
15447 jmp error_code
15448 CFI_ENDPROC
15449-END(spurious_interrupt_bug)
15450+ENDPROC(spurious_interrupt_bug)
15451
15452 ENTRY(kernel_thread_helper)
15453 pushl $0 # fake return address for unwinder
15454@@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
15455
15456 ENTRY(mcount)
15457 ret
15458-END(mcount)
15459+ENDPROC(mcount)
15460
15461 ENTRY(ftrace_caller)
15462 cmpl $0, function_trace_stop
15463@@ -1124,7 +1368,7 @@ ftrace_graph_call:
15464 .globl ftrace_stub
15465 ftrace_stub:
15466 ret
15467-END(ftrace_caller)
15468+ENDPROC(ftrace_caller)
15469
15470 #else /* ! CONFIG_DYNAMIC_FTRACE */
15471
15472@@ -1160,7 +1404,7 @@ trace:
15473 popl %ecx
15474 popl %eax
15475 jmp ftrace_stub
15476-END(mcount)
15477+ENDPROC(mcount)
15478 #endif /* CONFIG_DYNAMIC_FTRACE */
15479 #endif /* CONFIG_FUNCTION_TRACER */
15480
15481@@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
15482 popl %ecx
15483 popl %eax
15484 ret
15485-END(ftrace_graph_caller)
15486+ENDPROC(ftrace_graph_caller)
15487
15488 .globl return_to_handler
15489 return_to_handler:
15490@@ -1198,7 +1442,6 @@ return_to_handler:
15491 ret
15492 #endif
15493
15494-.section .rodata,"a"
15495 #include "syscall_table_32.S"
15496
15497 syscall_table_size=(.-sys_call_table)
15498@@ -1255,15 +1498,18 @@ error_code:
15499 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15500 REG_TO_PTGS %ecx
15501 SET_KERNEL_GS %ecx
15502- movl $(__USER_DS), %ecx
15503+ movl $(__KERNEL_DS), %ecx
15504 movl %ecx, %ds
15505 movl %ecx, %es
15506+
15507+ pax_enter_kernel
15508+
15509 TRACE_IRQS_OFF
15510 movl %esp,%eax # pt_regs pointer
15511 call *%edi
15512 jmp ret_from_exception
15513 CFI_ENDPROC
15514-END(page_fault)
15515+ENDPROC(page_fault)
15516
15517 /*
15518 * Debug traps and NMI can happen at the one SYSENTER instruction
15519@@ -1309,7 +1555,7 @@ debug_stack_correct:
15520 call do_debug
15521 jmp ret_from_exception
15522 CFI_ENDPROC
15523-END(debug)
15524+ENDPROC(debug)
15525
15526 /*
15527 * NMI is doubly nasty. It can happen _while_ we're handling
15528@@ -1351,6 +1597,9 @@ nmi_stack_correct:
15529 xorl %edx,%edx # zero error code
15530 movl %esp,%eax # pt_regs pointer
15531 call do_nmi
15532+
15533+ pax_exit_kernel
15534+
15535 jmp restore_all_notrace
15536 CFI_ENDPROC
15537
15538@@ -1391,12 +1640,15 @@ nmi_espfix_stack:
15539 FIXUP_ESPFIX_STACK # %eax == %esp
15540 xorl %edx,%edx # zero error code
15541 call do_nmi
15542+
15543+ pax_exit_kernel
15544+
15545 RESTORE_REGS
15546 lss 12+4(%esp), %esp # back to espfix stack
15547 CFI_ADJUST_CFA_OFFSET -24
15548 jmp irq_return
15549 CFI_ENDPROC
15550-END(nmi)
15551+ENDPROC(nmi)
15552
15553 ENTRY(int3)
15554 RING0_INT_FRAME
15555@@ -1409,7 +1661,7 @@ ENTRY(int3)
15556 call do_int3
15557 jmp ret_from_exception
15558 CFI_ENDPROC
15559-END(int3)
15560+ENDPROC(int3)
15561
15562 ENTRY(general_protection)
15563 RING0_EC_FRAME
15564@@ -1417,7 +1669,7 @@ ENTRY(general_protection)
15565 CFI_ADJUST_CFA_OFFSET 4
15566 jmp error_code
15567 CFI_ENDPROC
15568-END(general_protection)
15569+ENDPROC(general_protection)
15570
15571 /*
15572 * End of kprobes section
15573diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15574index 34a56a9..a4abbbe 100644
15575--- a/arch/x86/kernel/entry_64.S
15576+++ b/arch/x86/kernel/entry_64.S
15577@@ -53,6 +53,8 @@
15578 #include <asm/paravirt.h>
15579 #include <asm/ftrace.h>
15580 #include <asm/percpu.h>
15581+#include <asm/pgtable.h>
15582+#include <asm/alternative-asm.h>
15583
15584 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15585 #include <linux/elf-em.h>
15586@@ -64,8 +66,9 @@
15587 #ifdef CONFIG_FUNCTION_TRACER
15588 #ifdef CONFIG_DYNAMIC_FTRACE
15589 ENTRY(mcount)
15590+ pax_force_retaddr
15591 retq
15592-END(mcount)
15593+ENDPROC(mcount)
15594
15595 ENTRY(ftrace_caller)
15596 cmpl $0, function_trace_stop
15597@@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
15598 #endif
15599
15600 GLOBAL(ftrace_stub)
15601+ pax_force_retaddr
15602 retq
15603-END(ftrace_caller)
15604+ENDPROC(ftrace_caller)
15605
15606 #else /* ! CONFIG_DYNAMIC_FTRACE */
15607 ENTRY(mcount)
15608@@ -108,6 +112,7 @@ ENTRY(mcount)
15609 #endif
15610
15611 GLOBAL(ftrace_stub)
15612+ pax_force_retaddr
15613 retq
15614
15615 trace:
15616@@ -117,12 +122,13 @@ trace:
15617 movq 8(%rbp), %rsi
15618 subq $MCOUNT_INSN_SIZE, %rdi
15619
15620+ pax_force_fptr ftrace_trace_function
15621 call *ftrace_trace_function
15622
15623 MCOUNT_RESTORE_FRAME
15624
15625 jmp ftrace_stub
15626-END(mcount)
15627+ENDPROC(mcount)
15628 #endif /* CONFIG_DYNAMIC_FTRACE */
15629 #endif /* CONFIG_FUNCTION_TRACER */
15630
15631@@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
15632
15633 MCOUNT_RESTORE_FRAME
15634
15635+ pax_force_retaddr
15636 retq
15637-END(ftrace_graph_caller)
15638+ENDPROC(ftrace_graph_caller)
15639
15640 GLOBAL(return_to_handler)
15641 subq $24, %rsp
15642@@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
15643 movq 8(%rsp), %rdx
15644 movq (%rsp), %rax
15645 addq $16, %rsp
15646+ pax_force_retaddr
15647 retq
15648 #endif
15649
15650@@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
15651 ENDPROC(native_usergs_sysret64)
15652 #endif /* CONFIG_PARAVIRT */
15653
15654+ .macro ljmpq sel, off
15655+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15656+ .byte 0x48; ljmp *1234f(%rip)
15657+ .pushsection .rodata
15658+ .align 16
15659+ 1234: .quad \off; .word \sel
15660+ .popsection
15661+#else
15662+ pushq $\sel
15663+ pushq $\off
15664+ lretq
15665+#endif
15666+ .endm
15667+
15668+ .macro pax_enter_kernel
15669+ pax_set_fptr_mask
15670+#ifdef CONFIG_PAX_KERNEXEC
15671+ call pax_enter_kernel
15672+#endif
15673+ .endm
15674+
15675+ .macro pax_exit_kernel
15676+#ifdef CONFIG_PAX_KERNEXEC
15677+ call pax_exit_kernel
15678+#endif
15679+ .endm
15680+
15681+#ifdef CONFIG_PAX_KERNEXEC
15682+ENTRY(pax_enter_kernel)
15683+ pushq %rdi
15684+
15685+#ifdef CONFIG_PARAVIRT
15686+ PV_SAVE_REGS(CLBR_RDI)
15687+#endif
15688+
15689+ GET_CR0_INTO_RDI
15690+ bts $16,%rdi
15691+ jnc 3f
15692+ mov %cs,%edi
15693+ cmp $__KERNEL_CS,%edi
15694+ jnz 2f
15695+1:
15696+
15697+#ifdef CONFIG_PARAVIRT
15698+ PV_RESTORE_REGS(CLBR_RDI)
15699+#endif
15700+
15701+ popq %rdi
15702+ pax_force_retaddr
15703+ retq
15704+
15705+2: ljmpq __KERNEL_CS,1f
15706+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15707+4: SET_RDI_INTO_CR0
15708+ jmp 1b
15709+ENDPROC(pax_enter_kernel)
15710+
15711+ENTRY(pax_exit_kernel)
15712+ pushq %rdi
15713+
15714+#ifdef CONFIG_PARAVIRT
15715+ PV_SAVE_REGS(CLBR_RDI)
15716+#endif
15717+
15718+ mov %cs,%rdi
15719+ cmp $__KERNEXEC_KERNEL_CS,%edi
15720+ jz 2f
15721+1:
15722+
15723+#ifdef CONFIG_PARAVIRT
15724+ PV_RESTORE_REGS(CLBR_RDI);
15725+#endif
15726+
15727+ popq %rdi
15728+ pax_force_retaddr
15729+ retq
15730+
15731+2: GET_CR0_INTO_RDI
15732+ btr $16,%rdi
15733+ ljmpq __KERNEL_CS,3f
15734+3: SET_RDI_INTO_CR0
15735+ jmp 1b
15736+#ifdef CONFIG_PARAVIRT
15737+ PV_RESTORE_REGS(CLBR_RDI);
15738+#endif
15739+
15740+ popq %rdi
15741+ pax_force_retaddr
15742+ retq
15743+ENDPROC(pax_exit_kernel)
15744+#endif
15745+
15746+ .macro pax_enter_kernel_user
15747+ pax_set_fptr_mask
15748+#ifdef CONFIG_PAX_MEMORY_UDEREF
15749+ call pax_enter_kernel_user
15750+#endif
15751+ .endm
15752+
15753+ .macro pax_exit_kernel_user
15754+#ifdef CONFIG_PAX_MEMORY_UDEREF
15755+ call pax_exit_kernel_user
15756+#endif
15757+#ifdef CONFIG_PAX_RANDKSTACK
15758+ push %rax
15759+ call pax_randomize_kstack
15760+ pop %rax
15761+#endif
15762+ .endm
15763+
15764+#ifdef CONFIG_PAX_MEMORY_UDEREF
15765+ENTRY(pax_enter_kernel_user)
15766+ pushq %rdi
15767+ pushq %rbx
15768+
15769+#ifdef CONFIG_PARAVIRT
15770+ PV_SAVE_REGS(CLBR_RDI)
15771+#endif
15772+
15773+ GET_CR3_INTO_RDI
15774+ mov %rdi,%rbx
15775+ add $__START_KERNEL_map,%rbx
15776+ sub phys_base(%rip),%rbx
15777+
15778+#ifdef CONFIG_PARAVIRT
15779+ pushq %rdi
15780+ cmpl $0, pv_info+PARAVIRT_enabled
15781+ jz 1f
15782+ i = 0
15783+ .rept USER_PGD_PTRS
15784+ mov i*8(%rbx),%rsi
15785+ mov $0,%sil
15786+ lea i*8(%rbx),%rdi
15787+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15788+ i = i + 1
15789+ .endr
15790+ jmp 2f
15791+1:
15792+#endif
15793+
15794+ i = 0
15795+ .rept USER_PGD_PTRS
15796+ movb $0,i*8(%rbx)
15797+ i = i + 1
15798+ .endr
15799+
15800+#ifdef CONFIG_PARAVIRT
15801+2: popq %rdi
15802+#endif
15803+ SET_RDI_INTO_CR3
15804+
15805+#ifdef CONFIG_PAX_KERNEXEC
15806+ GET_CR0_INTO_RDI
15807+ bts $16,%rdi
15808+ SET_RDI_INTO_CR0
15809+#endif
15810+
15811+#ifdef CONFIG_PARAVIRT
15812+ PV_RESTORE_REGS(CLBR_RDI)
15813+#endif
15814+
15815+ popq %rbx
15816+ popq %rdi
15817+ pax_force_retaddr
15818+ retq
15819+ENDPROC(pax_enter_kernel_user)
15820+
15821+ENTRY(pax_exit_kernel_user)
15822+ push %rdi
15823+
15824+#ifdef CONFIG_PARAVIRT
15825+ pushq %rbx
15826+ PV_SAVE_REGS(CLBR_RDI)
15827+#endif
15828+
15829+#ifdef CONFIG_PAX_KERNEXEC
15830+ GET_CR0_INTO_RDI
15831+ btr $16,%rdi
15832+ SET_RDI_INTO_CR0
15833+#endif
15834+
15835+ GET_CR3_INTO_RDI
15836+ add $__START_KERNEL_map,%rdi
15837+ sub phys_base(%rip),%rdi
15838+
15839+#ifdef CONFIG_PARAVIRT
15840+ cmpl $0, pv_info+PARAVIRT_enabled
15841+ jz 1f
15842+ mov %rdi,%rbx
15843+ i = 0
15844+ .rept USER_PGD_PTRS
15845+ mov i*8(%rbx),%rsi
15846+ mov $0x67,%sil
15847+ lea i*8(%rbx),%rdi
15848+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15849+ i = i + 1
15850+ .endr
15851+ jmp 2f
15852+1:
15853+#endif
15854+
15855+ i = 0
15856+ .rept USER_PGD_PTRS
15857+ movb $0x67,i*8(%rdi)
15858+ i = i + 1
15859+ .endr
15860+
15861+#ifdef CONFIG_PARAVIRT
15862+2: PV_RESTORE_REGS(CLBR_RDI)
15863+ popq %rbx
15864+#endif
15865+
15866+ popq %rdi
15867+ pax_force_retaddr
15868+ retq
15869+ENDPROC(pax_exit_kernel_user)
15870+#endif
15871+
15872+.macro pax_erase_kstack
15873+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15874+ call pax_erase_kstack
15875+#endif
15876+.endm
15877+
15878+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15879+/*
15880+ * r11: thread_info
15881+ * rcx, rdx: can be clobbered
15882+ */
15883+ENTRY(pax_erase_kstack)
15884+ pushq %rdi
15885+ pushq %rax
15886+ pushq %r11
15887+
15888+ GET_THREAD_INFO(%r11)
15889+ mov TI_lowest_stack(%r11), %rdi
15890+ mov $-0xBEEF, %rax
15891+ std
15892+
15893+1: mov %edi, %ecx
15894+ and $THREAD_SIZE_asm - 1, %ecx
15895+ shr $3, %ecx
15896+ repne scasq
15897+ jecxz 2f
15898+
15899+ cmp $2*8, %ecx
15900+ jc 2f
15901+
15902+ mov $2*8, %ecx
15903+ repe scasq
15904+ jecxz 2f
15905+ jne 1b
15906+
15907+2: cld
15908+ mov %esp, %ecx
15909+ sub %edi, %ecx
15910+
15911+ cmp $THREAD_SIZE_asm, %rcx
15912+ jb 3f
15913+ ud2
15914+3:
15915+
15916+ shr $3, %ecx
15917+ rep stosq
15918+
15919+ mov TI_task_thread_sp0(%r11), %rdi
15920+ sub $256, %rdi
15921+ mov %rdi, TI_lowest_stack(%r11)
15922+
15923+ popq %r11
15924+ popq %rax
15925+ popq %rdi
15926+ pax_force_retaddr
15927+ ret
15928+ENDPROC(pax_erase_kstack)
15929+#endif
15930
15931 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15932 #ifdef CONFIG_TRACE_IRQFLAGS
15933@@ -317,7 +601,7 @@ ENTRY(save_args)
15934 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
15935 movq_cfi rbp, 8 /* push %rbp */
15936 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
15937- testl $3, CS(%rdi)
15938+ testb $3, CS(%rdi)
15939 je 1f
15940 SWAPGS
15941 /*
15942@@ -337,9 +621,10 @@ ENTRY(save_args)
15943 * We entered an interrupt context - irqs are off:
15944 */
15945 2: TRACE_IRQS_OFF
15946+ pax_force_retaddr
15947 ret
15948 CFI_ENDPROC
15949-END(save_args)
15950+ENDPROC(save_args)
15951
15952 ENTRY(save_rest)
15953 PARTIAL_FRAME 1 REST_SKIP+8
15954@@ -352,9 +637,10 @@ ENTRY(save_rest)
15955 movq_cfi r15, R15+16
15956 movq %r11, 8(%rsp) /* return address */
15957 FIXUP_TOP_OF_STACK %r11, 16
15958+ pax_force_retaddr
15959 ret
15960 CFI_ENDPROC
15961-END(save_rest)
15962+ENDPROC(save_rest)
15963
15964 /* save complete stack frame */
15965 .pushsection .kprobes.text, "ax"
15966@@ -383,9 +669,10 @@ ENTRY(save_paranoid)
15967 js 1f /* negative -> in kernel */
15968 SWAPGS
15969 xorl %ebx,%ebx
15970-1: ret
15971+1: pax_force_retaddr_bts
15972+ ret
15973 CFI_ENDPROC
15974-END(save_paranoid)
15975+ENDPROC(save_paranoid)
15976 .popsection
15977
15978 /*
15979@@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
15980
15981 RESTORE_REST
15982
15983- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15984+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15985 je int_ret_from_sys_call
15986
15987 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
15988@@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
15989 jmp ret_from_sys_call # go to the SYSRET fastpath
15990
15991 CFI_ENDPROC
15992-END(ret_from_fork)
15993+ENDPROC(ret_from_fork)
15994
15995 /*
15996 * System call entry. Upto 6 arguments in registers are supported.
15997@@ -455,7 +742,7 @@ END(ret_from_fork)
15998 ENTRY(system_call)
15999 CFI_STARTPROC simple
16000 CFI_SIGNAL_FRAME
16001- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16002+ CFI_DEF_CFA rsp,0
16003 CFI_REGISTER rip,rcx
16004 /*CFI_REGISTER rflags,r11*/
16005 SWAPGS_UNSAFE_STACK
16006@@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16007
16008 movq %rsp,PER_CPU_VAR(old_rsp)
16009 movq PER_CPU_VAR(kernel_stack),%rsp
16010+ SAVE_ARGS 8*6,1
16011+ pax_enter_kernel_user
16012 /*
16013 * No need to follow this irqs off/on section - it's straight
16014 * and short:
16015 */
16016 ENABLE_INTERRUPTS(CLBR_NONE)
16017- SAVE_ARGS 8,1
16018 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16019 movq %rcx,RIP-ARGOFFSET(%rsp)
16020 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16021@@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16022 system_call_fastpath:
16023 cmpq $__NR_syscall_max,%rax
16024 ja badsys
16025- movq %r10,%rcx
16026+ movq R10-ARGOFFSET(%rsp),%rcx
16027 call *sys_call_table(,%rax,8) # XXX: rip relative
16028 movq %rax,RAX-ARGOFFSET(%rsp)
16029 /*
16030@@ -502,6 +790,8 @@ sysret_check:
16031 andl %edi,%edx
16032 jnz sysret_careful
16033 CFI_REMEMBER_STATE
16034+ pax_exit_kernel_user
16035+ pax_erase_kstack
16036 /*
16037 * sysretq will re-enable interrupts:
16038 */
16039@@ -555,14 +845,18 @@ badsys:
16040 * jump back to the normal fast path.
16041 */
16042 auditsys:
16043- movq %r10,%r9 /* 6th arg: 4th syscall arg */
16044+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16045 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16046 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16047 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16048 movq %rax,%rsi /* 2nd arg: syscall number */
16049 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16050 call audit_syscall_entry
16051+
16052+ pax_erase_kstack
16053+
16054 LOAD_ARGS 0 /* reload call-clobbered registers */
16055+ pax_set_fptr_mask
16056 jmp system_call_fastpath
16057
16058 /*
16059@@ -592,16 +886,20 @@ tracesys:
16060 FIXUP_TOP_OF_STACK %rdi
16061 movq %rsp,%rdi
16062 call syscall_trace_enter
16063+
16064+ pax_erase_kstack
16065+
16066 /*
16067 * Reload arg registers from stack in case ptrace changed them.
16068 * We don't reload %rax because syscall_trace_enter() returned
16069 * the value it wants us to use in the table lookup.
16070 */
16071 LOAD_ARGS ARGOFFSET, 1
16072+ pax_set_fptr_mask
16073 RESTORE_REST
16074 cmpq $__NR_syscall_max,%rax
16075 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16076- movq %r10,%rcx /* fixup for C */
16077+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16078 call *sys_call_table(,%rax,8)
16079 movq %rax,RAX-ARGOFFSET(%rsp)
16080 /* Use IRET because user could have changed frame */
16081@@ -613,7 +911,7 @@ tracesys:
16082 GLOBAL(int_ret_from_sys_call)
16083 DISABLE_INTERRUPTS(CLBR_NONE)
16084 TRACE_IRQS_OFF
16085- testl $3,CS-ARGOFFSET(%rsp)
16086+ testb $3,CS-ARGOFFSET(%rsp)
16087 je retint_restore_args
16088 movl $_TIF_ALLWORK_MASK,%edi
16089 /* edi: mask to check */
16090@@ -674,7 +972,7 @@ int_restore_rest:
16091 TRACE_IRQS_OFF
16092 jmp int_with_check
16093 CFI_ENDPROC
16094-END(system_call)
16095+ENDPROC(system_call)
16096
16097 /*
16098 * Certain special system calls that need to save a complete full stack frame.
16099@@ -690,7 +988,7 @@ ENTRY(\label)
16100 call \func
16101 jmp ptregscall_common
16102 CFI_ENDPROC
16103-END(\label)
16104+ENDPROC(\label)
16105 .endm
16106
16107 PTREGSCALL stub_clone, sys_clone, %r8
16108@@ -708,9 +1006,10 @@ ENTRY(ptregscall_common)
16109 movq_cfi_restore R12+8, r12
16110 movq_cfi_restore RBP+8, rbp
16111 movq_cfi_restore RBX+8, rbx
16112+ pax_force_retaddr
16113 ret $REST_SKIP /* pop extended registers */
16114 CFI_ENDPROC
16115-END(ptregscall_common)
16116+ENDPROC(ptregscall_common)
16117
16118 ENTRY(stub_execve)
16119 CFI_STARTPROC
16120@@ -726,7 +1025,7 @@ ENTRY(stub_execve)
16121 RESTORE_REST
16122 jmp int_ret_from_sys_call
16123 CFI_ENDPROC
16124-END(stub_execve)
16125+ENDPROC(stub_execve)
16126
16127 /*
16128 * sigreturn is special because it needs to restore all registers on return.
16129@@ -744,7 +1043,7 @@ ENTRY(stub_rt_sigreturn)
16130 RESTORE_REST
16131 jmp int_ret_from_sys_call
16132 CFI_ENDPROC
16133-END(stub_rt_sigreturn)
16134+ENDPROC(stub_rt_sigreturn)
16135
16136 /*
16137 * Build the entry stubs and pointer table with some assembler magic.
16138@@ -780,7 +1079,7 @@ vector=vector+1
16139 2: jmp common_interrupt
16140 .endr
16141 CFI_ENDPROC
16142-END(irq_entries_start)
16143+ENDPROC(irq_entries_start)
16144
16145 .previous
16146 END(interrupt)
16147@@ -800,6 +1099,16 @@ END(interrupt)
16148 CFI_ADJUST_CFA_OFFSET 10*8
16149 call save_args
16150 PARTIAL_FRAME 0
16151+#ifdef CONFIG_PAX_MEMORY_UDEREF
16152+ testb $3, CS(%rdi)
16153+ jnz 1f
16154+ pax_enter_kernel
16155+ jmp 2f
16156+1: pax_enter_kernel_user
16157+2:
16158+#else
16159+ pax_enter_kernel
16160+#endif
16161 call \func
16162 .endm
16163
16164@@ -822,7 +1131,7 @@ ret_from_intr:
16165 CFI_ADJUST_CFA_OFFSET -8
16166 exit_intr:
16167 GET_THREAD_INFO(%rcx)
16168- testl $3,CS-ARGOFFSET(%rsp)
16169+ testb $3,CS-ARGOFFSET(%rsp)
16170 je retint_kernel
16171
16172 /* Interrupt came from user space */
16173@@ -844,12 +1153,16 @@ retint_swapgs: /* return to user-space */
16174 * The iretq could re-enable interrupts:
16175 */
16176 DISABLE_INTERRUPTS(CLBR_ANY)
16177+ pax_exit_kernel_user
16178+ pax_erase_kstack
16179 TRACE_IRQS_IRETQ
16180 SWAPGS
16181 jmp restore_args
16182
16183 retint_restore_args: /* return to kernel space */
16184 DISABLE_INTERRUPTS(CLBR_ANY)
16185+ pax_exit_kernel
16186+ pax_force_retaddr RIP-ARGOFFSET
16187 /*
16188 * The iretq could re-enable interrupts:
16189 */
16190@@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16191 #endif
16192
16193 CFI_ENDPROC
16194-END(common_interrupt)
16195+ENDPROC(common_interrupt)
16196
16197 /*
16198 * APIC interrupts.
16199@@ -953,7 +1266,7 @@ ENTRY(\sym)
16200 interrupt \do_sym
16201 jmp ret_from_intr
16202 CFI_ENDPROC
16203-END(\sym)
16204+ENDPROC(\sym)
16205 .endm
16206
16207 #ifdef CONFIG_SMP
16208@@ -1032,12 +1345,22 @@ ENTRY(\sym)
16209 CFI_ADJUST_CFA_OFFSET 15*8
16210 call error_entry
16211 DEFAULT_FRAME 0
16212+#ifdef CONFIG_PAX_MEMORY_UDEREF
16213+ testb $3, CS(%rsp)
16214+ jnz 1f
16215+ pax_enter_kernel
16216+ jmp 2f
16217+1: pax_enter_kernel_user
16218+2:
16219+#else
16220+ pax_enter_kernel
16221+#endif
16222 movq %rsp,%rdi /* pt_regs pointer */
16223 xorl %esi,%esi /* no error code */
16224 call \do_sym
16225 jmp error_exit /* %ebx: no swapgs flag */
16226 CFI_ENDPROC
16227-END(\sym)
16228+ENDPROC(\sym)
16229 .endm
16230
16231 .macro paranoidzeroentry sym do_sym
16232@@ -1049,12 +1372,22 @@ ENTRY(\sym)
16233 subq $15*8, %rsp
16234 call save_paranoid
16235 TRACE_IRQS_OFF
16236+#ifdef CONFIG_PAX_MEMORY_UDEREF
16237+ testb $3, CS(%rsp)
16238+ jnz 1f
16239+ pax_enter_kernel
16240+ jmp 2f
16241+1: pax_enter_kernel_user
16242+2:
16243+#else
16244+ pax_enter_kernel
16245+#endif
16246 movq %rsp,%rdi /* pt_regs pointer */
16247 xorl %esi,%esi /* no error code */
16248 call \do_sym
16249 jmp paranoid_exit /* %ebx: no swapgs flag */
16250 CFI_ENDPROC
16251-END(\sym)
16252+ENDPROC(\sym)
16253 .endm
16254
16255 .macro paranoidzeroentry_ist sym do_sym ist
16256@@ -1066,15 +1399,30 @@ ENTRY(\sym)
16257 subq $15*8, %rsp
16258 call save_paranoid
16259 TRACE_IRQS_OFF
16260+#ifdef CONFIG_PAX_MEMORY_UDEREF
16261+ testb $3, CS(%rsp)
16262+ jnz 1f
16263+ pax_enter_kernel
16264+ jmp 2f
16265+1: pax_enter_kernel_user
16266+2:
16267+#else
16268+ pax_enter_kernel
16269+#endif
16270 movq %rsp,%rdi /* pt_regs pointer */
16271 xorl %esi,%esi /* no error code */
16272- PER_CPU(init_tss, %rbp)
16273+#ifdef CONFIG_SMP
16274+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
16275+ lea init_tss(%rbp), %rbp
16276+#else
16277+ lea init_tss(%rip), %rbp
16278+#endif
16279 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16280 call \do_sym
16281 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16282 jmp paranoid_exit /* %ebx: no swapgs flag */
16283 CFI_ENDPROC
16284-END(\sym)
16285+ENDPROC(\sym)
16286 .endm
16287
16288 .macro errorentry sym do_sym
16289@@ -1085,13 +1433,23 @@ ENTRY(\sym)
16290 CFI_ADJUST_CFA_OFFSET 15*8
16291 call error_entry
16292 DEFAULT_FRAME 0
16293+#ifdef CONFIG_PAX_MEMORY_UDEREF
16294+ testb $3, CS(%rsp)
16295+ jnz 1f
16296+ pax_enter_kernel
16297+ jmp 2f
16298+1: pax_enter_kernel_user
16299+2:
16300+#else
16301+ pax_enter_kernel
16302+#endif
16303 movq %rsp,%rdi /* pt_regs pointer */
16304 movq ORIG_RAX(%rsp),%rsi /* get error code */
16305 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16306 call \do_sym
16307 jmp error_exit /* %ebx: no swapgs flag */
16308 CFI_ENDPROC
16309-END(\sym)
16310+ENDPROC(\sym)
16311 .endm
16312
16313 /* error code is on the stack already */
16314@@ -1104,13 +1462,23 @@ ENTRY(\sym)
16315 call save_paranoid
16316 DEFAULT_FRAME 0
16317 TRACE_IRQS_OFF
16318+#ifdef CONFIG_PAX_MEMORY_UDEREF
16319+ testb $3, CS(%rsp)
16320+ jnz 1f
16321+ pax_enter_kernel
16322+ jmp 2f
16323+1: pax_enter_kernel_user
16324+2:
16325+#else
16326+ pax_enter_kernel
16327+#endif
16328 movq %rsp,%rdi /* pt_regs pointer */
16329 movq ORIG_RAX(%rsp),%rsi /* get error code */
16330 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16331 call \do_sym
16332 jmp paranoid_exit /* %ebx: no swapgs flag */
16333 CFI_ENDPROC
16334-END(\sym)
16335+ENDPROC(\sym)
16336 .endm
16337
16338 zeroentry divide_error do_divide_error
16339@@ -1141,9 +1509,10 @@ gs_change:
16340 SWAPGS
16341 popf
16342 CFI_ADJUST_CFA_OFFSET -8
16343+ pax_force_retaddr
16344 ret
16345 CFI_ENDPROC
16346-END(native_load_gs_index)
16347+ENDPROC(native_load_gs_index)
16348
16349 .section __ex_table,"a"
16350 .align 8
16351@@ -1195,9 +1564,10 @@ ENTRY(kernel_thread)
16352 */
16353 RESTORE_ALL
16354 UNFAKE_STACK_FRAME
16355+ pax_force_retaddr
16356 ret
16357 CFI_ENDPROC
16358-END(kernel_thread)
16359+ENDPROC(kernel_thread)
16360
16361 ENTRY(child_rip)
16362 pushq $0 # fake return address
16363@@ -1208,13 +1578,14 @@ ENTRY(child_rip)
16364 */
16365 movq %rdi, %rax
16366 movq %rsi, %rdi
16367+ pax_force_fptr %rax
16368 call *%rax
16369 # exit
16370 mov %eax, %edi
16371 call do_exit
16372 ud2 # padding for call trace
16373 CFI_ENDPROC
16374-END(child_rip)
16375+ENDPROC(child_rip)
16376
16377 /*
16378 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16379@@ -1243,9 +1614,10 @@ ENTRY(kernel_execve)
16380 je int_ret_from_sys_call
16381 RESTORE_ARGS
16382 UNFAKE_STACK_FRAME
16383+ pax_force_retaddr
16384 ret
16385 CFI_ENDPROC
16386-END(kernel_execve)
16387+ENDPROC(kernel_execve)
16388
16389 /* Call softirq on interrupt stack. Interrupts are off. */
16390 ENTRY(call_softirq)
16391@@ -1263,9 +1635,10 @@ ENTRY(call_softirq)
16392 CFI_DEF_CFA_REGISTER rsp
16393 CFI_ADJUST_CFA_OFFSET -8
16394 decl PER_CPU_VAR(irq_count)
16395+ pax_force_retaddr
16396 ret
16397 CFI_ENDPROC
16398-END(call_softirq)
16399+ENDPROC(call_softirq)
16400
16401 #ifdef CONFIG_XEN
16402 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16403@@ -1303,7 +1676,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16404 decl PER_CPU_VAR(irq_count)
16405 jmp error_exit
16406 CFI_ENDPROC
16407-END(xen_do_hypervisor_callback)
16408+ENDPROC(xen_do_hypervisor_callback)
16409
16410 /*
16411 * Hypervisor uses this for application faults while it executes.
16412@@ -1362,7 +1735,7 @@ ENTRY(xen_failsafe_callback)
16413 SAVE_ALL
16414 jmp error_exit
16415 CFI_ENDPROC
16416-END(xen_failsafe_callback)
16417+ENDPROC(xen_failsafe_callback)
16418
16419 #endif /* CONFIG_XEN */
16420
16421@@ -1405,16 +1778,31 @@ ENTRY(paranoid_exit)
16422 TRACE_IRQS_OFF
16423 testl %ebx,%ebx /* swapgs needed? */
16424 jnz paranoid_restore
16425- testl $3,CS(%rsp)
16426+ testb $3,CS(%rsp)
16427 jnz paranoid_userspace
16428+#ifdef CONFIG_PAX_MEMORY_UDEREF
16429+ pax_exit_kernel
16430+ TRACE_IRQS_IRETQ 0
16431+ SWAPGS_UNSAFE_STACK
16432+ RESTORE_ALL 8
16433+ pax_force_retaddr_bts
16434+ jmp irq_return
16435+#endif
16436 paranoid_swapgs:
16437+#ifdef CONFIG_PAX_MEMORY_UDEREF
16438+ pax_exit_kernel_user
16439+#else
16440+ pax_exit_kernel
16441+#endif
16442 TRACE_IRQS_IRETQ 0
16443 SWAPGS_UNSAFE_STACK
16444 RESTORE_ALL 8
16445 jmp irq_return
16446 paranoid_restore:
16447+ pax_exit_kernel
16448 TRACE_IRQS_IRETQ 0
16449 RESTORE_ALL 8
16450+ pax_force_retaddr_bts
16451 jmp irq_return
16452 paranoid_userspace:
16453 GET_THREAD_INFO(%rcx)
16454@@ -1443,7 +1831,7 @@ paranoid_schedule:
16455 TRACE_IRQS_OFF
16456 jmp paranoid_userspace
16457 CFI_ENDPROC
16458-END(paranoid_exit)
16459+ENDPROC(paranoid_exit)
16460
16461 /*
16462 * Exception entry point. This expects an error code/orig_rax on the stack.
16463@@ -1470,12 +1858,13 @@ ENTRY(error_entry)
16464 movq_cfi r14, R14+8
16465 movq_cfi r15, R15+8
16466 xorl %ebx,%ebx
16467- testl $3,CS+8(%rsp)
16468+ testb $3,CS+8(%rsp)
16469 je error_kernelspace
16470 error_swapgs:
16471 SWAPGS
16472 error_sti:
16473 TRACE_IRQS_OFF
16474+ pax_force_retaddr_bts
16475 ret
16476 CFI_ENDPROC
16477
16478@@ -1497,7 +1886,7 @@ error_kernelspace:
16479 cmpq $gs_change,RIP+8(%rsp)
16480 je error_swapgs
16481 jmp error_sti
16482-END(error_entry)
16483+ENDPROC(error_entry)
16484
16485
16486 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16487@@ -1517,7 +1906,7 @@ ENTRY(error_exit)
16488 jnz retint_careful
16489 jmp retint_swapgs
16490 CFI_ENDPROC
16491-END(error_exit)
16492+ENDPROC(error_exit)
16493
16494
16495 /* runs on exception stack */
16496@@ -1529,6 +1918,16 @@ ENTRY(nmi)
16497 CFI_ADJUST_CFA_OFFSET 15*8
16498 call save_paranoid
16499 DEFAULT_FRAME 0
16500+#ifdef CONFIG_PAX_MEMORY_UDEREF
16501+ testb $3, CS(%rsp)
16502+ jnz 1f
16503+ pax_enter_kernel
16504+ jmp 2f
16505+1: pax_enter_kernel_user
16506+2:
16507+#else
16508+ pax_enter_kernel
16509+#endif
16510 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16511 movq %rsp,%rdi
16512 movq $-1,%rsi
16513@@ -1539,12 +1938,28 @@ ENTRY(nmi)
16514 DISABLE_INTERRUPTS(CLBR_NONE)
16515 testl %ebx,%ebx /* swapgs needed? */
16516 jnz nmi_restore
16517- testl $3,CS(%rsp)
16518+ testb $3,CS(%rsp)
16519 jnz nmi_userspace
16520+#ifdef CONFIG_PAX_MEMORY_UDEREF
16521+ pax_exit_kernel
16522+ SWAPGS_UNSAFE_STACK
16523+ RESTORE_ALL 8
16524+ pax_force_retaddr_bts
16525+ jmp irq_return
16526+#endif
16527 nmi_swapgs:
16528+#ifdef CONFIG_PAX_MEMORY_UDEREF
16529+ pax_exit_kernel_user
16530+#else
16531+ pax_exit_kernel
16532+#endif
16533 SWAPGS_UNSAFE_STACK
16534+ RESTORE_ALL 8
16535+ jmp irq_return
16536 nmi_restore:
16537+ pax_exit_kernel
16538 RESTORE_ALL 8
16539+ pax_force_retaddr_bts
16540 jmp irq_return
16541 nmi_userspace:
16542 GET_THREAD_INFO(%rcx)
16543@@ -1573,14 +1988,14 @@ nmi_schedule:
16544 jmp paranoid_exit
16545 CFI_ENDPROC
16546 #endif
16547-END(nmi)
16548+ENDPROC(nmi)
16549
16550 ENTRY(ignore_sysret)
16551 CFI_STARTPROC
16552 mov $-ENOSYS,%eax
16553 sysret
16554 CFI_ENDPROC
16555-END(ignore_sysret)
16556+ENDPROC(ignore_sysret)
16557
16558 /*
16559 * End of kprobes section
16560diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16561index 9dbb527..7b3615a 100644
16562--- a/arch/x86/kernel/ftrace.c
16563+++ b/arch/x86/kernel/ftrace.c
16564@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16565 static void *mod_code_newcode; /* holds the text to write to the IP */
16566
16567 static unsigned nmi_wait_count;
16568-static atomic_t nmi_update_count = ATOMIC_INIT(0);
16569+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16570
16571 int ftrace_arch_read_dyn_info(char *buf, int size)
16572 {
16573@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16574
16575 r = snprintf(buf, size, "%u %u",
16576 nmi_wait_count,
16577- atomic_read(&nmi_update_count));
16578+ atomic_read_unchecked(&nmi_update_count));
16579 return r;
16580 }
16581
16582@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
16583 {
16584 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16585 smp_rmb();
16586+ pax_open_kernel();
16587 ftrace_mod_code();
16588- atomic_inc(&nmi_update_count);
16589+ pax_close_kernel();
16590+ atomic_inc_unchecked(&nmi_update_count);
16591 }
16592 /* Must have previous changes seen before executions */
16593 smp_mb();
16594@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
16595
16596
16597
16598-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
16599+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
16600
16601 static unsigned char *ftrace_nop_replace(void)
16602 {
16603@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
16604 {
16605 unsigned char replaced[MCOUNT_INSN_SIZE];
16606
16607+ ip = ktla_ktva(ip);
16608+
16609 /*
16610 * Note: Due to modules and __init, code can
16611 * disappear and change, we need to protect against faulting
16612@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16613 unsigned char old[MCOUNT_INSN_SIZE], *new;
16614 int ret;
16615
16616- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16617+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16618 new = ftrace_call_replace(ip, (unsigned long)func);
16619 ret = ftrace_modify_code(ip, old, new);
16620
16621@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
16622 switch (faulted) {
16623 case 0:
16624 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
16625- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
16626+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
16627 break;
16628 case 1:
16629 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
16630- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
16631+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
16632 break;
16633 case 2:
16634 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
16635- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
16636+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
16637 break;
16638 }
16639
16640@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16641 {
16642 unsigned char code[MCOUNT_INSN_SIZE];
16643
16644+ ip = ktla_ktva(ip);
16645+
16646 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16647 return -EFAULT;
16648
16649diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16650index 4f8e250..df24706 100644
16651--- a/arch/x86/kernel/head32.c
16652+++ b/arch/x86/kernel/head32.c
16653@@ -16,6 +16,7 @@
16654 #include <asm/apic.h>
16655 #include <asm/io_apic.h>
16656 #include <asm/bios_ebda.h>
16657+#include <asm/boot.h>
16658
16659 static void __init i386_default_early_setup(void)
16660 {
16661@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
16662 {
16663 reserve_trampoline_memory();
16664
16665- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16666+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16667
16668 #ifdef CONFIG_BLK_DEV_INITRD
16669 /* Reserve INITRD */
16670diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16671index 34c3308..6fc4e76 100644
16672--- a/arch/x86/kernel/head_32.S
16673+++ b/arch/x86/kernel/head_32.S
16674@@ -19,10 +19,17 @@
16675 #include <asm/setup.h>
16676 #include <asm/processor-flags.h>
16677 #include <asm/percpu.h>
16678+#include <asm/msr-index.h>
16679
16680 /* Physical address */
16681 #define pa(X) ((X) - __PAGE_OFFSET)
16682
16683+#ifdef CONFIG_PAX_KERNEXEC
16684+#define ta(X) (X)
16685+#else
16686+#define ta(X) ((X) - __PAGE_OFFSET)
16687+#endif
16688+
16689 /*
16690 * References to members of the new_cpu_data structure.
16691 */
16692@@ -52,11 +59,7 @@
16693 * and small than max_low_pfn, otherwise will waste some page table entries
16694 */
16695
16696-#if PTRS_PER_PMD > 1
16697-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16698-#else
16699-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16700-#endif
16701+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16702
16703 /* Enough space to fit pagetables for the low memory linear map */
16704 MAPPING_BEYOND_END = \
16705@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
16706 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16707
16708 /*
16709+ * Real beginning of normal "text" segment
16710+ */
16711+ENTRY(stext)
16712+ENTRY(_stext)
16713+
16714+/*
16715 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16716 * %esi points to the real-mode code as a 32-bit pointer.
16717 * CS and DS must be 4 GB flat segments, but we don't depend on
16718@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16719 * can.
16720 */
16721 __HEAD
16722+
16723+#ifdef CONFIG_PAX_KERNEXEC
16724+ jmp startup_32
16725+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16726+.fill PAGE_SIZE-5,1,0xcc
16727+#endif
16728+
16729 ENTRY(startup_32)
16730+ movl pa(stack_start),%ecx
16731+
16732 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
16733 us to not reload segments */
16734 testb $(1<<6), BP_loadflags(%esi)
16735@@ -95,7 +113,60 @@ ENTRY(startup_32)
16736 movl %eax,%es
16737 movl %eax,%fs
16738 movl %eax,%gs
16739+ movl %eax,%ss
16740 2:
16741+ leal -__PAGE_OFFSET(%ecx),%esp
16742+
16743+#ifdef CONFIG_SMP
16744+ movl $pa(cpu_gdt_table),%edi
16745+ movl $__per_cpu_load,%eax
16746+ movw %ax,__KERNEL_PERCPU + 2(%edi)
16747+ rorl $16,%eax
16748+ movb %al,__KERNEL_PERCPU + 4(%edi)
16749+ movb %ah,__KERNEL_PERCPU + 7(%edi)
16750+ movl $__per_cpu_end - 1,%eax
16751+ subl $__per_cpu_start,%eax
16752+ movw %ax,__KERNEL_PERCPU + 0(%edi)
16753+#endif
16754+
16755+#ifdef CONFIG_PAX_MEMORY_UDEREF
16756+ movl $NR_CPUS,%ecx
16757+ movl $pa(cpu_gdt_table),%edi
16758+1:
16759+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16760+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16761+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16762+ addl $PAGE_SIZE_asm,%edi
16763+ loop 1b
16764+#endif
16765+
16766+#ifdef CONFIG_PAX_KERNEXEC
16767+ movl $pa(boot_gdt),%edi
16768+ movl $__LOAD_PHYSICAL_ADDR,%eax
16769+ movw %ax,__BOOT_CS + 2(%edi)
16770+ rorl $16,%eax
16771+ movb %al,__BOOT_CS + 4(%edi)
16772+ movb %ah,__BOOT_CS + 7(%edi)
16773+ rorl $16,%eax
16774+
16775+ ljmp $(__BOOT_CS),$1f
16776+1:
16777+
16778+ movl $NR_CPUS,%ecx
16779+ movl $pa(cpu_gdt_table),%edi
16780+ addl $__PAGE_OFFSET,%eax
16781+1:
16782+ movw %ax,__KERNEL_CS + 2(%edi)
16783+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16784+ rorl $16,%eax
16785+ movb %al,__KERNEL_CS + 4(%edi)
16786+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16787+ movb %ah,__KERNEL_CS + 7(%edi)
16788+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16789+ rorl $16,%eax
16790+ addl $PAGE_SIZE_asm,%edi
16791+ loop 1b
16792+#endif
16793
16794 /*
16795 * Clear BSS first so that there are no surprises...
16796@@ -140,9 +211,7 @@ ENTRY(startup_32)
16797 cmpl $num_subarch_entries, %eax
16798 jae bad_subarch
16799
16800- movl pa(subarch_entries)(,%eax,4), %eax
16801- subl $__PAGE_OFFSET, %eax
16802- jmp *%eax
16803+ jmp *pa(subarch_entries)(,%eax,4)
16804
16805 bad_subarch:
16806 WEAK(lguest_entry)
16807@@ -154,10 +223,10 @@ WEAK(xen_entry)
16808 __INITDATA
16809
16810 subarch_entries:
16811- .long default_entry /* normal x86/PC */
16812- .long lguest_entry /* lguest hypervisor */
16813- .long xen_entry /* Xen hypervisor */
16814- .long default_entry /* Moorestown MID */
16815+ .long ta(default_entry) /* normal x86/PC */
16816+ .long ta(lguest_entry) /* lguest hypervisor */
16817+ .long ta(xen_entry) /* Xen hypervisor */
16818+ .long ta(default_entry) /* Moorestown MID */
16819 num_subarch_entries = (. - subarch_entries) / 4
16820 .previous
16821 #endif /* CONFIG_PARAVIRT */
16822@@ -218,8 +287,11 @@ default_entry:
16823 movl %eax, pa(max_pfn_mapped)
16824
16825 /* Do early initialization of the fixmap area */
16826- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16827- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16828+#ifdef CONFIG_COMPAT_VDSO
16829+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16830+#else
16831+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16832+#endif
16833 #else /* Not PAE */
16834
16835 page_pde_offset = (__PAGE_OFFSET >> 20);
16836@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16837 movl %eax, pa(max_pfn_mapped)
16838
16839 /* Do early initialization of the fixmap area */
16840- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16841- movl %eax,pa(swapper_pg_dir+0xffc)
16842+#ifdef CONFIG_COMPAT_VDSO
16843+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
16844+#else
16845+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
16846+#endif
16847 #endif
16848 jmp 3f
16849 /*
16850@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
16851 movl %eax,%es
16852 movl %eax,%fs
16853 movl %eax,%gs
16854+ movl pa(stack_start),%ecx
16855+ movl %eax,%ss
16856+ leal -__PAGE_OFFSET(%ecx),%esp
16857 #endif /* CONFIG_SMP */
16858 3:
16859
16860@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
16861 orl %edx,%eax
16862 movl %eax,%cr4
16863
16864+#ifdef CONFIG_X86_PAE
16865 btl $5, %eax # check if PAE is enabled
16866 jnc 6f
16867
16868@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
16869 cpuid
16870 cmpl $0x80000000, %eax
16871 jbe 6f
16872+
16873+ /* Clear bogus XD_DISABLE bits */
16874+ call verify_cpu
16875+
16876 mov $0x80000001, %eax
16877 cpuid
16878 /* Execute Disable bit supported? */
16879@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
16880 jnc 6f
16881
16882 /* Setup EFER (Extended Feature Enable Register) */
16883- movl $0xc0000080, %ecx
16884+ movl $MSR_EFER, %ecx
16885 rdmsr
16886
16887 btsl $11, %eax
16888 /* Make changes effective */
16889 wrmsr
16890
16891+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16892+ movl $1,pa(nx_enabled)
16893+#endif
16894+
16895 6:
16896
16897 /*
16898@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
16899 movl %eax,%cr0 /* ..and set paging (PG) bit */
16900 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
16901 1:
16902- /* Set up the stack pointer */
16903- lss stack_start,%esp
16904+ /* Shift the stack pointer to a virtual address */
16905+ addl $__PAGE_OFFSET, %esp
16906
16907 /*
16908 * Initialize eflags. Some BIOS's leave bits like NT set. This would
16909@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
16910
16911 #ifdef CONFIG_SMP
16912 cmpb $0, ready
16913- jz 1f /* Initial CPU cleans BSS */
16914- jmp checkCPUtype
16915-1:
16916+ jnz checkCPUtype
16917 #endif /* CONFIG_SMP */
16918
16919 /*
16920@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
16921 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16922 movl %eax,%ss # after changing gdt.
16923
16924- movl $(__USER_DS),%eax # DS/ES contains default USER segment
16925+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16926 movl %eax,%ds
16927 movl %eax,%es
16928
16929@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
16930 */
16931 cmpb $0,ready
16932 jne 1f
16933- movl $per_cpu__gdt_page,%eax
16934+ movl $cpu_gdt_table,%eax
16935 movl $per_cpu__stack_canary,%ecx
16936+#ifdef CONFIG_SMP
16937+ addl $__per_cpu_load,%ecx
16938+#endif
16939 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16940 shrl $16, %ecx
16941 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16942 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16943 1:
16944-#endif
16945 movl $(__KERNEL_STACK_CANARY),%eax
16946+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16947+ movl $(__USER_DS),%eax
16948+#else
16949+ xorl %eax,%eax
16950+#endif
16951 movl %eax,%gs
16952
16953 xorl %eax,%eax # Clear LDT
16954@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
16955
16956 cld # gcc2 wants the direction flag cleared at all times
16957 pushl $0 # fake return address for unwinder
16958-#ifdef CONFIG_SMP
16959- movb ready, %cl
16960 movb $1, ready
16961- cmpb $0,%cl # the first CPU calls start_kernel
16962- je 1f
16963- movl (stack_start), %esp
16964-1:
16965-#endif /* CONFIG_SMP */
16966 jmp *(initial_code)
16967
16968 /*
16969@@ -546,22 +631,22 @@ early_page_fault:
16970 jmp early_fault
16971
16972 early_fault:
16973- cld
16974 #ifdef CONFIG_PRINTK
16975+ cmpl $1,%ss:early_recursion_flag
16976+ je hlt_loop
16977+ incl %ss:early_recursion_flag
16978+ cld
16979 pusha
16980 movl $(__KERNEL_DS),%eax
16981 movl %eax,%ds
16982 movl %eax,%es
16983- cmpl $2,early_recursion_flag
16984- je hlt_loop
16985- incl early_recursion_flag
16986 movl %cr2,%eax
16987 pushl %eax
16988 pushl %edx /* trapno */
16989 pushl $fault_msg
16990 call printk
16991+; call dump_stack
16992 #endif
16993- call dump_stack
16994 hlt_loop:
16995 hlt
16996 jmp hlt_loop
16997@@ -569,8 +654,11 @@ hlt_loop:
16998 /* This is the default interrupt "handler" :-) */
16999 ALIGN
17000 ignore_int:
17001- cld
17002 #ifdef CONFIG_PRINTK
17003+ cmpl $2,%ss:early_recursion_flag
17004+ je hlt_loop
17005+ incl %ss:early_recursion_flag
17006+ cld
17007 pushl %eax
17008 pushl %ecx
17009 pushl %edx
17010@@ -579,9 +667,6 @@ ignore_int:
17011 movl $(__KERNEL_DS),%eax
17012 movl %eax,%ds
17013 movl %eax,%es
17014- cmpl $2,early_recursion_flag
17015- je hlt_loop
17016- incl early_recursion_flag
17017 pushl 16(%esp)
17018 pushl 24(%esp)
17019 pushl 32(%esp)
17020@@ -600,6 +685,8 @@ ignore_int:
17021 #endif
17022 iret
17023
17024+#include "verify_cpu.S"
17025+
17026 __REFDATA
17027 .align 4
17028 ENTRY(initial_code)
17029@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17030 /*
17031 * BSS section
17032 */
17033-__PAGE_ALIGNED_BSS
17034- .align PAGE_SIZE_asm
17035 #ifdef CONFIG_X86_PAE
17036+.section .swapper_pg_pmd,"a",@progbits
17037 swapper_pg_pmd:
17038 .fill 1024*KPMDS,4,0
17039 #else
17040+.section .swapper_pg_dir,"a",@progbits
17041 ENTRY(swapper_pg_dir)
17042 .fill 1024,4,0
17043 #endif
17044+.section .swapper_pg_fixmap,"a",@progbits
17045 swapper_pg_fixmap:
17046 .fill 1024,4,0
17047 #ifdef CONFIG_X86_TRAMPOLINE
17048+.section .trampoline_pg_dir,"a",@progbits
17049 ENTRY(trampoline_pg_dir)
17050+#ifdef CONFIG_X86_PAE
17051+ .fill 4,8,0
17052+#else
17053 .fill 1024,4,0
17054 #endif
17055+#endif
17056+
17057+.section .empty_zero_page,"a",@progbits
17058 ENTRY(empty_zero_page)
17059 .fill 4096,1,0
17060
17061 /*
17062+ * The IDT has to be page-aligned to simplify the Pentium
17063+ * F0 0F bug workaround.. We have a special link segment
17064+ * for this.
17065+ */
17066+.section .idt,"a",@progbits
17067+ENTRY(idt_table)
17068+ .fill 256,8,0
17069+
17070+/*
17071 * This starts the data section.
17072 */
17073 #ifdef CONFIG_X86_PAE
17074-__PAGE_ALIGNED_DATA
17075- /* Page-aligned for the benefit of paravirt? */
17076- .align PAGE_SIZE_asm
17077+.section .swapper_pg_dir,"a",@progbits
17078+
17079 ENTRY(swapper_pg_dir)
17080 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17081 # if KPMDS == 3
17082@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17083 # error "Kernel PMDs should be 1, 2 or 3"
17084 # endif
17085 .align PAGE_SIZE_asm /* needs to be page-sized too */
17086+
17087+#ifdef CONFIG_PAX_PER_CPU_PGD
17088+ENTRY(cpu_pgd)
17089+ .rept NR_CPUS
17090+ .fill 4,8,0
17091+ .endr
17092+#endif
17093+
17094 #endif
17095
17096 .data
17097+.balign 4
17098 ENTRY(stack_start)
17099- .long init_thread_union+THREAD_SIZE
17100- .long __BOOT_DS
17101+ .long init_thread_union+THREAD_SIZE-8
17102
17103 ready: .byte 0
17104
17105+.section .rodata,"a",@progbits
17106 early_recursion_flag:
17107 .long 0
17108
17109@@ -697,7 +809,7 @@ fault_msg:
17110 .word 0 # 32 bit align gdt_desc.address
17111 boot_gdt_descr:
17112 .word __BOOT_DS+7
17113- .long boot_gdt - __PAGE_OFFSET
17114+ .long pa(boot_gdt)
17115
17116 .word 0 # 32-bit align idt_desc.address
17117 idt_descr:
17118@@ -708,7 +820,7 @@ idt_descr:
17119 .word 0 # 32 bit align gdt_desc.address
17120 ENTRY(early_gdt_descr)
17121 .word GDT_ENTRIES*8-1
17122- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17123+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
17124
17125 /*
17126 * The boot_gdt must mirror the equivalent in setup.S and is
17127@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17128 .align L1_CACHE_BYTES
17129 ENTRY(boot_gdt)
17130 .fill GDT_ENTRY_BOOT_CS,8,0
17131- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17132- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17133+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17134+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17135+
17136+ .align PAGE_SIZE_asm
17137+ENTRY(cpu_gdt_table)
17138+ .rept NR_CPUS
17139+ .quad 0x0000000000000000 /* NULL descriptor */
17140+ .quad 0x0000000000000000 /* 0x0b reserved */
17141+ .quad 0x0000000000000000 /* 0x13 reserved */
17142+ .quad 0x0000000000000000 /* 0x1b reserved */
17143+
17144+#ifdef CONFIG_PAX_KERNEXEC
17145+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17146+#else
17147+ .quad 0x0000000000000000 /* 0x20 unused */
17148+#endif
17149+
17150+ .quad 0x0000000000000000 /* 0x28 unused */
17151+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17152+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17153+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17154+ .quad 0x0000000000000000 /* 0x4b reserved */
17155+ .quad 0x0000000000000000 /* 0x53 reserved */
17156+ .quad 0x0000000000000000 /* 0x5b reserved */
17157+
17158+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17159+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17160+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17161+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17162+
17163+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17164+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17165+
17166+ /*
17167+ * Segments used for calling PnP BIOS have byte granularity.
17168+ * The code segments and data segments have fixed 64k limits,
17169+ * the transfer segment sizes are set at run time.
17170+ */
17171+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
17172+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
17173+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
17174+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
17175+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
17176+
17177+ /*
17178+ * The APM segments have byte granularity and their bases
17179+ * are set at run time. All have 64k limits.
17180+ */
17181+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17182+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17183+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
17184+
17185+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17186+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17187+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17188+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17189+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17190+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17191+
17192+ /* Be sure this is zeroed to avoid false validations in Xen */
17193+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17194+ .endr
17195diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17196index 780cd92..564ca35 100644
17197--- a/arch/x86/kernel/head_64.S
17198+++ b/arch/x86/kernel/head_64.S
17199@@ -19,6 +19,8 @@
17200 #include <asm/cache.h>
17201 #include <asm/processor-flags.h>
17202 #include <asm/percpu.h>
17203+#include <asm/cpufeature.h>
17204+#include <asm/alternative-asm.h>
17205
17206 #ifdef CONFIG_PARAVIRT
17207 #include <asm/asm-offsets.h>
17208@@ -38,6 +40,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17209 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17210 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17211 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17212+L4_VMALLOC_START = pgd_index(VMALLOC_START)
17213+L3_VMALLOC_START = pud_index(VMALLOC_START)
17214+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17215+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17216
17217 .text
17218 __HEAD
17219@@ -85,35 +91,22 @@ startup_64:
17220 */
17221 addq %rbp, init_level4_pgt + 0(%rip)
17222 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17223+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17224+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17225 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17226
17227 addq %rbp, level3_ident_pgt + 0(%rip)
17228+#ifndef CONFIG_XEN
17229+ addq %rbp, level3_ident_pgt + 8(%rip)
17230+#endif
17231
17232- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17233- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17234-
17235- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17236-
17237- /* Add an Identity mapping if I am above 1G */
17238- leaq _text(%rip), %rdi
17239- andq $PMD_PAGE_MASK, %rdi
17240-
17241- movq %rdi, %rax
17242- shrq $PUD_SHIFT, %rax
17243- andq $(PTRS_PER_PUD - 1), %rax
17244- jz ident_complete
17245+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17246
17247- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17248- leaq level3_ident_pgt(%rip), %rbx
17249- movq %rdx, 0(%rbx, %rax, 8)
17250+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17251+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17252
17253- movq %rdi, %rax
17254- shrq $PMD_SHIFT, %rax
17255- andq $(PTRS_PER_PMD - 1), %rax
17256- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17257- leaq level2_spare_pgt(%rip), %rbx
17258- movq %rdx, 0(%rbx, %rax, 8)
17259-ident_complete:
17260+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17261+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17262
17263 /*
17264 * Fixup the kernel text+data virtual addresses. Note that
17265@@ -161,8 +154,8 @@ ENTRY(secondary_startup_64)
17266 * after the boot processor executes this code.
17267 */
17268
17269- /* Enable PAE mode and PGE */
17270- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17271+ /* Enable PAE mode and PSE/PGE */
17272+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17273 movq %rax, %cr4
17274
17275 /* Setup early boot stage 4 level pagetables. */
17276@@ -184,9 +177,15 @@ ENTRY(secondary_startup_64)
17277 movl $MSR_EFER, %ecx
17278 rdmsr
17279 btsl $_EFER_SCE, %eax /* Enable System Call */
17280- btl $20,%edi /* No Execute supported? */
17281+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17282 jnc 1f
17283 btsl $_EFER_NX, %eax
17284+ leaq init_level4_pgt(%rip), %rdi
17285+#ifndef CONFIG_EFI
17286+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17287+#endif
17288+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17289+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17290 1: wrmsr /* Make changes effective */
17291
17292 /* Setup cr0 */
17293@@ -249,6 +248,7 @@ ENTRY(secondary_startup_64)
17294 * jump. In addition we need to ensure %cs is set so we make this
17295 * a far return.
17296 */
17297+ pax_set_fptr_mask
17298 movq initial_code(%rip),%rax
17299 pushq $0 # fake return address to stop unwinder
17300 pushq $__KERNEL_CS # set correct cs
17301@@ -262,16 +262,16 @@ ENTRY(secondary_startup_64)
17302 .quad x86_64_start_kernel
17303 ENTRY(initial_gs)
17304 .quad INIT_PER_CPU_VAR(irq_stack_union)
17305- __FINITDATA
17306
17307 ENTRY(stack_start)
17308 .quad init_thread_union+THREAD_SIZE-8
17309 .word 0
17310+ __FINITDATA
17311
17312 bad_address:
17313 jmp bad_address
17314
17315- .section ".init.text","ax"
17316+ __INIT
17317 #ifdef CONFIG_EARLY_PRINTK
17318 .globl early_idt_handlers
17319 early_idt_handlers:
17320@@ -316,18 +316,23 @@ ENTRY(early_idt_handler)
17321 #endif /* EARLY_PRINTK */
17322 1: hlt
17323 jmp 1b
17324+ .previous
17325
17326 #ifdef CONFIG_EARLY_PRINTK
17327+ __INITDATA
17328 early_recursion_flag:
17329 .long 0
17330+ .previous
17331
17332+ .section .rodata,"a",@progbits
17333 early_idt_msg:
17334 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17335 early_idt_ripmsg:
17336 .asciz "RIP %s\n"
17337-#endif /* CONFIG_EARLY_PRINTK */
17338 .previous
17339+#endif /* CONFIG_EARLY_PRINTK */
17340
17341+ .section .rodata,"a",@progbits
17342 #define NEXT_PAGE(name) \
17343 .balign PAGE_SIZE; \
17344 ENTRY(name)
17345@@ -350,13 +355,36 @@ NEXT_PAGE(init_level4_pgt)
17346 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17347 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17348 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17349+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
17350+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
17351+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17352+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17353 .org init_level4_pgt + L4_START_KERNEL*8, 0
17354 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17355 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17356
17357+#ifdef CONFIG_PAX_PER_CPU_PGD
17358+NEXT_PAGE(cpu_pgd)
17359+ .rept NR_CPUS
17360+ .fill 512,8,0
17361+ .endr
17362+#endif
17363+
17364 NEXT_PAGE(level3_ident_pgt)
17365 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17366+#ifdef CONFIG_XEN
17367 .fill 511,8,0
17368+#else
17369+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17370+ .fill 510,8,0
17371+#endif
17372+
17373+NEXT_PAGE(level3_vmalloc_pgt)
17374+ .fill 512,8,0
17375+
17376+NEXT_PAGE(level3_vmemmap_pgt)
17377+ .fill L3_VMEMMAP_START,8,0
17378+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17379
17380 NEXT_PAGE(level3_kernel_pgt)
17381 .fill L3_START_KERNEL,8,0
17382@@ -364,20 +392,23 @@ NEXT_PAGE(level3_kernel_pgt)
17383 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17384 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17385
17386+NEXT_PAGE(level2_vmemmap_pgt)
17387+ .fill 512,8,0
17388+
17389 NEXT_PAGE(level2_fixmap_pgt)
17390- .fill 506,8,0
17391- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17392- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17393- .fill 5,8,0
17394+ .fill 507,8,0
17395+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17396+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17397+ .fill 4,8,0
17398
17399-NEXT_PAGE(level1_fixmap_pgt)
17400+NEXT_PAGE(level1_vsyscall_pgt)
17401 .fill 512,8,0
17402
17403-NEXT_PAGE(level2_ident_pgt)
17404- /* Since I easily can, map the first 1G.
17405+ /* Since I easily can, map the first 2G.
17406 * Don't set NX because code runs from these pages.
17407 */
17408- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17409+NEXT_PAGE(level2_ident_pgt)
17410+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17411
17412 NEXT_PAGE(level2_kernel_pgt)
17413 /*
17414@@ -390,33 +421,55 @@ NEXT_PAGE(level2_kernel_pgt)
17415 * If you want to increase this then increase MODULES_VADDR
17416 * too.)
17417 */
17418- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17419- KERNEL_IMAGE_SIZE/PMD_SIZE)
17420-
17421-NEXT_PAGE(level2_spare_pgt)
17422- .fill 512, 8, 0
17423+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17424
17425 #undef PMDS
17426 #undef NEXT_PAGE
17427
17428- .data
17429+ .align PAGE_SIZE
17430+ENTRY(cpu_gdt_table)
17431+ .rept NR_CPUS
17432+ .quad 0x0000000000000000 /* NULL descriptor */
17433+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17434+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
17435+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
17436+ .quad 0x00cffb000000ffff /* __USER32_CS */
17437+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17438+ .quad 0x00affb000000ffff /* __USER_CS */
17439+
17440+#ifdef CONFIG_PAX_KERNEXEC
17441+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17442+#else
17443+ .quad 0x0 /* unused */
17444+#endif
17445+
17446+ .quad 0,0 /* TSS */
17447+ .quad 0,0 /* LDT */
17448+ .quad 0,0,0 /* three TLS descriptors */
17449+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
17450+ /* asm/segment.h:GDT_ENTRIES must match this */
17451+
17452+ /* zero the remaining page */
17453+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17454+ .endr
17455+
17456 .align 16
17457 .globl early_gdt_descr
17458 early_gdt_descr:
17459 .word GDT_ENTRIES*8-1
17460 early_gdt_descr_base:
17461- .quad INIT_PER_CPU_VAR(gdt_page)
17462+ .quad cpu_gdt_table
17463
17464 ENTRY(phys_base)
17465 /* This must match the first entry in level2_kernel_pgt */
17466 .quad 0x0000000000000000
17467
17468 #include "../../x86/xen/xen-head.S"
17469-
17470- .section .bss, "aw", @nobits
17471+
17472+ .section .rodata,"a",@progbits
17473 .align L1_CACHE_BYTES
17474 ENTRY(idt_table)
17475- .skip IDT_ENTRIES * 16
17476+ .fill 512,8,0
17477
17478 __PAGE_ALIGNED_BSS
17479 .align PAGE_SIZE
17480diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17481index 9c3bd4a..e1d9b35 100644
17482--- a/arch/x86/kernel/i386_ksyms_32.c
17483+++ b/arch/x86/kernel/i386_ksyms_32.c
17484@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17485 EXPORT_SYMBOL(cmpxchg8b_emu);
17486 #endif
17487
17488+EXPORT_SYMBOL_GPL(cpu_gdt_table);
17489+
17490 /* Networking helper routines. */
17491 EXPORT_SYMBOL(csum_partial_copy_generic);
17492+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17493+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17494
17495 EXPORT_SYMBOL(__get_user_1);
17496 EXPORT_SYMBOL(__get_user_2);
17497@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17498
17499 EXPORT_SYMBOL(csum_partial);
17500 EXPORT_SYMBOL(empty_zero_page);
17501+
17502+#ifdef CONFIG_PAX_KERNEXEC
17503+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17504+#endif
17505diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17506index df89102..a244320 100644
17507--- a/arch/x86/kernel/i8259.c
17508+++ b/arch/x86/kernel/i8259.c
17509@@ -208,7 +208,7 @@ spurious_8259A_irq:
17510 "spurious 8259A interrupt: IRQ%d.\n", irq);
17511 spurious_irq_mask |= irqmask;
17512 }
17513- atomic_inc(&irq_err_count);
17514+ atomic_inc_unchecked(&irq_err_count);
17515 /*
17516 * Theoretically we do not have to handle this IRQ,
17517 * but in Linux this does not cause problems and is
17518diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17519index 3a54dcb..1c22348 100644
17520--- a/arch/x86/kernel/init_task.c
17521+++ b/arch/x86/kernel/init_task.c
17522@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17523 * way process stacks are handled. This is done by having a special
17524 * "init_task" linker map entry..
17525 */
17526-union thread_union init_thread_union __init_task_data =
17527- { INIT_THREAD_INFO(init_task) };
17528+union thread_union init_thread_union __init_task_data;
17529
17530 /*
17531 * Initial task structure.
17532@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17533 * section. Since TSS's are completely CPU-local, we want them
17534 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17535 */
17536-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17537-
17538+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17539+EXPORT_SYMBOL(init_tss);
17540diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17541index 99c4d30..74c84e9 100644
17542--- a/arch/x86/kernel/ioport.c
17543+++ b/arch/x86/kernel/ioport.c
17544@@ -6,6 +6,7 @@
17545 #include <linux/sched.h>
17546 #include <linux/kernel.h>
17547 #include <linux/capability.h>
17548+#include <linux/security.h>
17549 #include <linux/errno.h>
17550 #include <linux/types.h>
17551 #include <linux/ioport.h>
17552@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17553
17554 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17555 return -EINVAL;
17556+#ifdef CONFIG_GRKERNSEC_IO
17557+ if (turn_on && grsec_disable_privio) {
17558+ gr_handle_ioperm();
17559+ return -EPERM;
17560+ }
17561+#endif
17562 if (turn_on && !capable(CAP_SYS_RAWIO))
17563 return -EPERM;
17564
17565@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17566 * because the ->io_bitmap_max value must match the bitmap
17567 * contents:
17568 */
17569- tss = &per_cpu(init_tss, get_cpu());
17570+ tss = init_tss + get_cpu();
17571
17572 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
17573
17574@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
17575 return -EINVAL;
17576 /* Trying to gain more privileges? */
17577 if (level > old) {
17578+#ifdef CONFIG_GRKERNSEC_IO
17579+ if (grsec_disable_privio) {
17580+ gr_handle_iopl();
17581+ return -EPERM;
17582+ }
17583+#endif
17584 if (!capable(CAP_SYS_RAWIO))
17585 return -EPERM;
17586 }
17587diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17588index 04bbd52..83a07d9 100644
17589--- a/arch/x86/kernel/irq.c
17590+++ b/arch/x86/kernel/irq.c
17591@@ -15,7 +15,7 @@
17592 #include <asm/mce.h>
17593 #include <asm/hw_irq.h>
17594
17595-atomic_t irq_err_count;
17596+atomic_unchecked_t irq_err_count;
17597
17598 /* Function pointer for generic interrupt vector handling */
17599 void (*generic_interrupt_extension)(void) = NULL;
17600@@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
17601 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17602 seq_printf(p, " Machine check polls\n");
17603 #endif
17604- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17605+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17606 #if defined(CONFIG_X86_IO_APIC)
17607- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17608+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17609 #endif
17610 return 0;
17611 }
17612@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17613
17614 u64 arch_irq_stat(void)
17615 {
17616- u64 sum = atomic_read(&irq_err_count);
17617+ u64 sum = atomic_read_unchecked(&irq_err_count);
17618
17619 #ifdef CONFIG_X86_IO_APIC
17620- sum += atomic_read(&irq_mis_count);
17621+ sum += atomic_read_unchecked(&irq_mis_count);
17622 #endif
17623 return sum;
17624 }
17625diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17626index 7d35d0f..03f1d52 100644
17627--- a/arch/x86/kernel/irq_32.c
17628+++ b/arch/x86/kernel/irq_32.c
17629@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
17630 __asm__ __volatile__("andl %%esp,%0" :
17631 "=r" (sp) : "0" (THREAD_SIZE - 1));
17632
17633- return sp < (sizeof(struct thread_info) + STACK_WARN);
17634+ return sp < STACK_WARN;
17635 }
17636
17637 static void print_stack_overflow(void)
17638@@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
17639 * per-CPU IRQ handling contexts (thread information and stack)
17640 */
17641 union irq_ctx {
17642- struct thread_info tinfo;
17643- u32 stack[THREAD_SIZE/sizeof(u32)];
17644-} __attribute__((aligned(PAGE_SIZE)));
17645+ unsigned long previous_esp;
17646+ u32 stack[THREAD_SIZE/sizeof(u32)];
17647+} __attribute__((aligned(THREAD_SIZE)));
17648
17649 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17650 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
17651@@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
17652 static inline int
17653 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17654 {
17655- union irq_ctx *curctx, *irqctx;
17656+ union irq_ctx *irqctx;
17657 u32 *isp, arg1, arg2;
17658
17659- curctx = (union irq_ctx *) current_thread_info();
17660 irqctx = __get_cpu_var(hardirq_ctx);
17661
17662 /*
17663@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17664 * handler) we can't do that and just have to keep using the
17665 * current stack (which is the irq stack already after all)
17666 */
17667- if (unlikely(curctx == irqctx))
17668+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17669 return 0;
17670
17671 /* build the stack frame on the IRQ stack */
17672- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17673- irqctx->tinfo.task = curctx->tinfo.task;
17674- irqctx->tinfo.previous_esp = current_stack_pointer;
17675+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17676+ irqctx->previous_esp = current_stack_pointer;
17677
17678- /*
17679- * Copy the softirq bits in preempt_count so that the
17680- * softirq checks work in the hardirq context.
17681- */
17682- irqctx->tinfo.preempt_count =
17683- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17684- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17685+#ifdef CONFIG_PAX_MEMORY_UDEREF
17686+ __set_fs(MAKE_MM_SEG(0));
17687+#endif
17688
17689 if (unlikely(overflow))
17690 call_on_stack(print_stack_overflow, isp);
17691@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17692 : "0" (irq), "1" (desc), "2" (isp),
17693 "D" (desc->handle_irq)
17694 : "memory", "cc", "ecx");
17695+
17696+#ifdef CONFIG_PAX_MEMORY_UDEREF
17697+ __set_fs(current_thread_info()->addr_limit);
17698+#endif
17699+
17700 return 1;
17701 }
17702
17703@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17704 */
17705 void __cpuinit irq_ctx_init(int cpu)
17706 {
17707- union irq_ctx *irqctx;
17708-
17709 if (per_cpu(hardirq_ctx, cpu))
17710 return;
17711
17712- irqctx = &per_cpu(hardirq_stack, cpu);
17713- irqctx->tinfo.task = NULL;
17714- irqctx->tinfo.exec_domain = NULL;
17715- irqctx->tinfo.cpu = cpu;
17716- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17717- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17718-
17719- per_cpu(hardirq_ctx, cpu) = irqctx;
17720-
17721- irqctx = &per_cpu(softirq_stack, cpu);
17722- irqctx->tinfo.task = NULL;
17723- irqctx->tinfo.exec_domain = NULL;
17724- irqctx->tinfo.cpu = cpu;
17725- irqctx->tinfo.preempt_count = 0;
17726- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17727-
17728- per_cpu(softirq_ctx, cpu) = irqctx;
17729+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
17730+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
17731
17732 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17733 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17734@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
17735 asmlinkage void do_softirq(void)
17736 {
17737 unsigned long flags;
17738- struct thread_info *curctx;
17739 union irq_ctx *irqctx;
17740 u32 *isp;
17741
17742@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
17743 local_irq_save(flags);
17744
17745 if (local_softirq_pending()) {
17746- curctx = current_thread_info();
17747 irqctx = __get_cpu_var(softirq_ctx);
17748- irqctx->tinfo.task = curctx->task;
17749- irqctx->tinfo.previous_esp = current_stack_pointer;
17750+ irqctx->previous_esp = current_stack_pointer;
17751
17752 /* build the stack frame on the softirq stack */
17753- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17754+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17755+
17756+#ifdef CONFIG_PAX_MEMORY_UDEREF
17757+ __set_fs(MAKE_MM_SEG(0));
17758+#endif
17759
17760 call_on_stack(__do_softirq, isp);
17761+
17762+#ifdef CONFIG_PAX_MEMORY_UDEREF
17763+ __set_fs(current_thread_info()->addr_limit);
17764+#endif
17765+
17766 /*
17767 * Shouldnt happen, we returned above if in_interrupt():
17768 */
17769diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17770index 8d82a77..0baf312 100644
17771--- a/arch/x86/kernel/kgdb.c
17772+++ b/arch/x86/kernel/kgdb.c
17773@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17774
17775 /* clear the trace bit */
17776 linux_regs->flags &= ~X86_EFLAGS_TF;
17777- atomic_set(&kgdb_cpu_doing_single_step, -1);
17778+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17779
17780 /* set the trace bit if we're stepping */
17781 if (remcomInBuffer[0] == 's') {
17782 linux_regs->flags |= X86_EFLAGS_TF;
17783 kgdb_single_step = 1;
17784- atomic_set(&kgdb_cpu_doing_single_step,
17785+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17786 raw_smp_processor_id());
17787 }
17788
17789@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17790 break;
17791
17792 case DIE_DEBUG:
17793- if (atomic_read(&kgdb_cpu_doing_single_step) ==
17794+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
17795 raw_smp_processor_id()) {
17796 if (user_mode(regs))
17797 return single_step_cont(regs, args);
17798@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
17799 return instruction_pointer(regs);
17800 }
17801
17802-struct kgdb_arch arch_kgdb_ops = {
17803+const struct kgdb_arch arch_kgdb_ops = {
17804 /* Breakpoint instruction: */
17805 .gdb_bpt_instr = { 0xcc },
17806 .flags = KGDB_HW_BREAKPOINT,
17807diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17808index 7a67820..8d15b75 100644
17809--- a/arch/x86/kernel/kprobes.c
17810+++ b/arch/x86/kernel/kprobes.c
17811@@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
17812 char op;
17813 s32 raddr;
17814 } __attribute__((packed)) * jop;
17815- jop = (struct __arch_jmp_op *)from;
17816+
17817+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
17818+
17819+ pax_open_kernel();
17820 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
17821 jop->op = RELATIVEJUMP_INSTRUCTION;
17822+ pax_close_kernel();
17823 }
17824
17825 /*
17826@@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17827 kprobe_opcode_t opcode;
17828 kprobe_opcode_t *orig_opcodes = opcodes;
17829
17830- if (search_exception_tables((unsigned long)opcodes))
17831+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17832 return 0; /* Page fault may occur on this address. */
17833
17834 retry:
17835@@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
17836 disp = (u8 *) p->addr + *((s32 *) insn) -
17837 (u8 *) p->ainsn.insn;
17838 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
17839+ pax_open_kernel();
17840 *(s32 *)insn = (s32) disp;
17841+ pax_close_kernel();
17842 }
17843 }
17844 #endif
17845@@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
17846
17847 static void __kprobes arch_copy_kprobe(struct kprobe *p)
17848 {
17849- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17850+ pax_open_kernel();
17851+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17852+ pax_close_kernel();
17853
17854 fix_riprel(p);
17855
17856- if (can_boost(p->addr))
17857+ if (can_boost(ktla_ktva(p->addr)))
17858 p->ainsn.boostable = 0;
17859 else
17860 p->ainsn.boostable = -1;
17861
17862- p->opcode = *p->addr;
17863+ p->opcode = *(ktla_ktva(p->addr));
17864 }
17865
17866 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17867@@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
17868 if (p->opcode == BREAKPOINT_INSTRUCTION)
17869 regs->ip = (unsigned long)p->addr;
17870 else
17871- regs->ip = (unsigned long)p->ainsn.insn;
17872+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17873 }
17874
17875 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
17876@@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17877 if (p->ainsn.boostable == 1 && !p->post_handler) {
17878 /* Boost up -- we can execute copied instructions directly */
17879 reset_current_kprobe();
17880- regs->ip = (unsigned long)p->ainsn.insn;
17881+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17882 preempt_enable_no_resched();
17883 return;
17884 }
17885@@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17886 struct kprobe_ctlblk *kcb;
17887
17888 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
17889- if (*addr != BREAKPOINT_INSTRUCTION) {
17890+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17891 /*
17892 * The breakpoint instruction was removed right
17893 * after we hit it. Another cpu has removed
17894@@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17895 /* Skip orig_ax, ip, cs */
17896 " addq $24, %rsp\n"
17897 " popfq\n"
17898+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17899+ " btsq $63,(%rsp)\n"
17900+#endif
17901 #else
17902 " pushf\n"
17903 /*
17904@@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17905 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17906 {
17907 unsigned long *tos = stack_addr(regs);
17908- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17909+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17910 unsigned long orig_ip = (unsigned long)p->addr;
17911 kprobe_opcode_t *insn = p->ainsn.insn;
17912
17913@@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
17914 struct die_args *args = data;
17915 int ret = NOTIFY_DONE;
17916
17917- if (args->regs && user_mode_vm(args->regs))
17918+ if (args->regs && user_mode(args->regs))
17919 return ret;
17920
17921 switch (val) {
17922diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
17923index 63b0ec8..6d92227 100644
17924--- a/arch/x86/kernel/kvm.c
17925+++ b/arch/x86/kernel/kvm.c
17926@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
17927 pv_mmu_ops.set_pud = kvm_set_pud;
17928 #if PAGETABLE_LEVELS == 4
17929 pv_mmu_ops.set_pgd = kvm_set_pgd;
17930+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
17931 #endif
17932 #endif
17933 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
17934diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17935index ec6ef60..ab2c824 100644
17936--- a/arch/x86/kernel/ldt.c
17937+++ b/arch/x86/kernel/ldt.c
17938@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17939 if (reload) {
17940 #ifdef CONFIG_SMP
17941 preempt_disable();
17942- load_LDT(pc);
17943+ load_LDT_nolock(pc);
17944 if (!cpumask_equal(mm_cpumask(current->mm),
17945 cpumask_of(smp_processor_id())))
17946 smp_call_function(flush_ldt, current->mm, 1);
17947 preempt_enable();
17948 #else
17949- load_LDT(pc);
17950+ load_LDT_nolock(pc);
17951 #endif
17952 }
17953 if (oldsize) {
17954@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
17955 return err;
17956
17957 for (i = 0; i < old->size; i++)
17958- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
17959+ write_ldt_entry(new->ldt, i, old->ldt + i);
17960 return 0;
17961 }
17962
17963@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
17964 retval = copy_ldt(&mm->context, &old_mm->context);
17965 mutex_unlock(&old_mm->context.lock);
17966 }
17967+
17968+ if (tsk == current) {
17969+ mm->context.vdso = 0;
17970+
17971+#ifdef CONFIG_X86_32
17972+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17973+ mm->context.user_cs_base = 0UL;
17974+ mm->context.user_cs_limit = ~0UL;
17975+
17976+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17977+ cpus_clear(mm->context.cpu_user_cs_mask);
17978+#endif
17979+
17980+#endif
17981+#endif
17982+
17983+ }
17984+
17985 return retval;
17986 }
17987
17988@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
17989 }
17990 }
17991
17992+#ifdef CONFIG_PAX_SEGMEXEC
17993+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
17994+ error = -EINVAL;
17995+ goto out_unlock;
17996+ }
17997+#endif
17998+
17999 fill_ldt(&ldt, &ldt_info);
18000 if (oldmode)
18001 ldt.avl = 0;
18002diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18003index c1c429d..f02eaf9 100644
18004--- a/arch/x86/kernel/machine_kexec_32.c
18005+++ b/arch/x86/kernel/machine_kexec_32.c
18006@@ -26,7 +26,7 @@
18007 #include <asm/system.h>
18008 #include <asm/cacheflush.h>
18009
18010-static void set_idt(void *newidt, __u16 limit)
18011+static void set_idt(struct desc_struct *newidt, __u16 limit)
18012 {
18013 struct desc_ptr curidt;
18014
18015@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18016 }
18017
18018
18019-static void set_gdt(void *newgdt, __u16 limit)
18020+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18021 {
18022 struct desc_ptr curgdt;
18023
18024@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18025 }
18026
18027 control_page = page_address(image->control_code_page);
18028- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18029+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18030
18031 relocate_kernel_ptr = control_page;
18032 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18033diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18034index 1e47679..e73449d 100644
18035--- a/arch/x86/kernel/microcode_amd.c
18036+++ b/arch/x86/kernel/microcode_amd.c
18037@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18038 uci->mc = NULL;
18039 }
18040
18041-static struct microcode_ops microcode_amd_ops = {
18042+static const struct microcode_ops microcode_amd_ops = {
18043 .request_microcode_user = request_microcode_user,
18044 .request_microcode_fw = request_microcode_fw,
18045 .collect_cpu_info = collect_cpu_info_amd,
18046@@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18047 .microcode_fini_cpu = microcode_fini_cpu_amd,
18048 };
18049
18050-struct microcode_ops * __init init_amd_microcode(void)
18051+const struct microcode_ops * __init init_amd_microcode(void)
18052 {
18053 return &microcode_amd_ops;
18054 }
18055diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18056index 378e9a8..b5a6ea9 100644
18057--- a/arch/x86/kernel/microcode_core.c
18058+++ b/arch/x86/kernel/microcode_core.c
18059@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18060
18061 #define MICROCODE_VERSION "2.00"
18062
18063-static struct microcode_ops *microcode_ops;
18064+static const struct microcode_ops *microcode_ops;
18065
18066 /*
18067 * Synchronization.
18068diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18069index 0d334dd..14cedaf 100644
18070--- a/arch/x86/kernel/microcode_intel.c
18071+++ b/arch/x86/kernel/microcode_intel.c
18072@@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18073
18074 static int get_ucode_user(void *to, const void *from, size_t n)
18075 {
18076- return copy_from_user(to, from, n);
18077+ return copy_from_user(to, (const void __force_user *)from, n);
18078 }
18079
18080 static enum ucode_state
18081 request_microcode_user(int cpu, const void __user *buf, size_t size)
18082 {
18083- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18084+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18085 }
18086
18087 static void microcode_fini_cpu(int cpu)
18088@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18089 uci->mc = NULL;
18090 }
18091
18092-static struct microcode_ops microcode_intel_ops = {
18093+static const struct microcode_ops microcode_intel_ops = {
18094 .request_microcode_user = request_microcode_user,
18095 .request_microcode_fw = request_microcode_fw,
18096 .collect_cpu_info = collect_cpu_info,
18097@@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18098 .microcode_fini_cpu = microcode_fini_cpu,
18099 };
18100
18101-struct microcode_ops * __init init_intel_microcode(void)
18102+const struct microcode_ops * __init init_intel_microcode(void)
18103 {
18104 return &microcode_intel_ops;
18105 }
18106diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18107index 89f386f..9028f51 100644
18108--- a/arch/x86/kernel/module.c
18109+++ b/arch/x86/kernel/module.c
18110@@ -34,7 +34,7 @@
18111 #define DEBUGP(fmt...)
18112 #endif
18113
18114-void *module_alloc(unsigned long size)
18115+static void *__module_alloc(unsigned long size, pgprot_t prot)
18116 {
18117 struct vm_struct *area;
18118
18119@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18120 if (!area)
18121 return NULL;
18122
18123- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18124- PAGE_KERNEL_EXEC);
18125+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18126+}
18127+
18128+void *module_alloc(unsigned long size)
18129+{
18130+
18131+#ifdef CONFIG_PAX_KERNEXEC
18132+ return __module_alloc(size, PAGE_KERNEL);
18133+#else
18134+ return __module_alloc(size, PAGE_KERNEL_EXEC);
18135+#endif
18136+
18137 }
18138
18139 /* Free memory returned from module_alloc */
18140@@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18141 vfree(module_region);
18142 }
18143
18144+#ifdef CONFIG_PAX_KERNEXEC
18145+#ifdef CONFIG_X86_32
18146+void *module_alloc_exec(unsigned long size)
18147+{
18148+ struct vm_struct *area;
18149+
18150+ if (size == 0)
18151+ return NULL;
18152+
18153+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18154+ return area ? area->addr : NULL;
18155+}
18156+EXPORT_SYMBOL(module_alloc_exec);
18157+
18158+void module_free_exec(struct module *mod, void *module_region)
18159+{
18160+ vunmap(module_region);
18161+}
18162+EXPORT_SYMBOL(module_free_exec);
18163+#else
18164+void module_free_exec(struct module *mod, void *module_region)
18165+{
18166+ module_free(mod, module_region);
18167+}
18168+EXPORT_SYMBOL(module_free_exec);
18169+
18170+void *module_alloc_exec(unsigned long size)
18171+{
18172+ return __module_alloc(size, PAGE_KERNEL_RX);
18173+}
18174+EXPORT_SYMBOL(module_alloc_exec);
18175+#endif
18176+#endif
18177+
18178 /* We don't need anything special. */
18179 int module_frob_arch_sections(Elf_Ehdr *hdr,
18180 Elf_Shdr *sechdrs,
18181@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18182 unsigned int i;
18183 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18184 Elf32_Sym *sym;
18185- uint32_t *location;
18186+ uint32_t *plocation, location;
18187
18188 DEBUGP("Applying relocate section %u to %u\n", relsec,
18189 sechdrs[relsec].sh_info);
18190 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18191 /* This is where to make the change */
18192- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18193- + rel[i].r_offset;
18194+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18195+ location = (uint32_t)plocation;
18196+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18197+ plocation = ktla_ktva((void *)plocation);
18198 /* This is the symbol it is referring to. Note that all
18199 undefined symbols have been resolved. */
18200 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18201@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18202 switch (ELF32_R_TYPE(rel[i].r_info)) {
18203 case R_386_32:
18204 /* We add the value into the location given */
18205- *location += sym->st_value;
18206+ pax_open_kernel();
18207+ *plocation += sym->st_value;
18208+ pax_close_kernel();
18209 break;
18210 case R_386_PC32:
18211 /* Add the value, subtract its postition */
18212- *location += sym->st_value - (uint32_t)location;
18213+ pax_open_kernel();
18214+ *plocation += sym->st_value - location;
18215+ pax_close_kernel();
18216 break;
18217 default:
18218 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18219@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18220 case R_X86_64_NONE:
18221 break;
18222 case R_X86_64_64:
18223+ pax_open_kernel();
18224 *(u64 *)loc = val;
18225+ pax_close_kernel();
18226 break;
18227 case R_X86_64_32:
18228+ pax_open_kernel();
18229 *(u32 *)loc = val;
18230+ pax_close_kernel();
18231 if (val != *(u32 *)loc)
18232 goto overflow;
18233 break;
18234 case R_X86_64_32S:
18235+ pax_open_kernel();
18236 *(s32 *)loc = val;
18237+ pax_close_kernel();
18238 if ((s64)val != *(s32 *)loc)
18239 goto overflow;
18240 break;
18241 case R_X86_64_PC32:
18242 val -= (u64)loc;
18243+ pax_open_kernel();
18244 *(u32 *)loc = val;
18245+ pax_close_kernel();
18246+
18247 #if 0
18248 if ((s64)val != *(s32 *)loc)
18249 goto overflow;
18250diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18251index 3a7c5a4..9191528 100644
18252--- a/arch/x86/kernel/paravirt-spinlocks.c
18253+++ b/arch/x86/kernel/paravirt-spinlocks.c
18254@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
18255 __raw_spin_lock(lock);
18256 }
18257
18258-struct pv_lock_ops pv_lock_ops = {
18259+struct pv_lock_ops pv_lock_ops __read_only = {
18260 #ifdef CONFIG_SMP
18261 .spin_is_locked = __ticket_spin_is_locked,
18262 .spin_is_contended = __ticket_spin_is_contended,
18263diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18264index 1b1739d..dea6077 100644
18265--- a/arch/x86/kernel/paravirt.c
18266+++ b/arch/x86/kernel/paravirt.c
18267@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18268 {
18269 return x;
18270 }
18271+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18272+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18273+#endif
18274
18275 void __init default_banner(void)
18276 {
18277@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
18278 * corresponding structure. */
18279 static void *get_call_destination(u8 type)
18280 {
18281- struct paravirt_patch_template tmpl = {
18282+ const struct paravirt_patch_template tmpl = {
18283 .pv_init_ops = pv_init_ops,
18284 .pv_time_ops = pv_time_ops,
18285 .pv_cpu_ops = pv_cpu_ops,
18286@@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
18287 .pv_lock_ops = pv_lock_ops,
18288 #endif
18289 };
18290+
18291+ pax_track_stack();
18292 return *((void **)&tmpl + type);
18293 }
18294
18295@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18296 if (opfunc == NULL)
18297 /* If there's no function, patch it with a ud2a (BUG) */
18298 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18299- else if (opfunc == _paravirt_nop)
18300+ else if (opfunc == (void *)_paravirt_nop)
18301 /* If the operation is a nop, then nop the callsite */
18302 ret = paravirt_patch_nop();
18303
18304 /* identity functions just return their single argument */
18305- else if (opfunc == _paravirt_ident_32)
18306+ else if (opfunc == (void *)_paravirt_ident_32)
18307 ret = paravirt_patch_ident_32(insnbuf, len);
18308- else if (opfunc == _paravirt_ident_64)
18309+ else if (opfunc == (void *)_paravirt_ident_64)
18310+ ret = paravirt_patch_ident_64(insnbuf, len);
18311+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18312+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18313 ret = paravirt_patch_ident_64(insnbuf, len);
18314+#endif
18315
18316 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18317 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18318@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18319 if (insn_len > len || start == NULL)
18320 insn_len = len;
18321 else
18322- memcpy(insnbuf, start, insn_len);
18323+ memcpy(insnbuf, ktla_ktva(start), insn_len);
18324
18325 return insn_len;
18326 }
18327@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
18328 preempt_enable();
18329 }
18330
18331-struct pv_info pv_info = {
18332+struct pv_info pv_info __read_only = {
18333 .name = "bare hardware",
18334 .paravirt_enabled = 0,
18335 .kernel_rpl = 0,
18336 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
18337 };
18338
18339-struct pv_init_ops pv_init_ops = {
18340+struct pv_init_ops pv_init_ops __read_only = {
18341 .patch = native_patch,
18342 };
18343
18344-struct pv_time_ops pv_time_ops = {
18345+struct pv_time_ops pv_time_ops __read_only = {
18346 .sched_clock = native_sched_clock,
18347 };
18348
18349-struct pv_irq_ops pv_irq_ops = {
18350+struct pv_irq_ops pv_irq_ops __read_only = {
18351 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18352 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18353 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18354@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
18355 #endif
18356 };
18357
18358-struct pv_cpu_ops pv_cpu_ops = {
18359+struct pv_cpu_ops pv_cpu_ops __read_only = {
18360 .cpuid = native_cpuid,
18361 .get_debugreg = native_get_debugreg,
18362 .set_debugreg = native_set_debugreg,
18363@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18364 .end_context_switch = paravirt_nop,
18365 };
18366
18367-struct pv_apic_ops pv_apic_ops = {
18368+struct pv_apic_ops pv_apic_ops __read_only = {
18369 #ifdef CONFIG_X86_LOCAL_APIC
18370 .startup_ipi_hook = paravirt_nop,
18371 #endif
18372 };
18373
18374-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18375+#ifdef CONFIG_X86_32
18376+#ifdef CONFIG_X86_PAE
18377+/* 64-bit pagetable entries */
18378+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18379+#else
18380 /* 32-bit pagetable entries */
18381 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18382+#endif
18383 #else
18384 /* 64-bit pagetable entries */
18385 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18386 #endif
18387
18388-struct pv_mmu_ops pv_mmu_ops = {
18389+struct pv_mmu_ops pv_mmu_ops __read_only = {
18390
18391 .read_cr2 = native_read_cr2,
18392 .write_cr2 = native_write_cr2,
18393@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18394 .make_pud = PTE_IDENT,
18395
18396 .set_pgd = native_set_pgd,
18397+ .set_pgd_batched = native_set_pgd_batched,
18398 #endif
18399 #endif /* PAGETABLE_LEVELS >= 3 */
18400
18401@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18402 },
18403
18404 .set_fixmap = native_set_fixmap,
18405+
18406+#ifdef CONFIG_PAX_KERNEXEC
18407+ .pax_open_kernel = native_pax_open_kernel,
18408+ .pax_close_kernel = native_pax_close_kernel,
18409+#endif
18410+
18411 };
18412
18413 EXPORT_SYMBOL_GPL(pv_time_ops);
18414diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
18415index 1a2d4b1..6a0dd55 100644
18416--- a/arch/x86/kernel/pci-calgary_64.c
18417+++ b/arch/x86/kernel/pci-calgary_64.c
18418@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
18419 free_pages((unsigned long)vaddr, get_order(size));
18420 }
18421
18422-static struct dma_map_ops calgary_dma_ops = {
18423+static const struct dma_map_ops calgary_dma_ops = {
18424 .alloc_coherent = calgary_alloc_coherent,
18425 .free_coherent = calgary_free_coherent,
18426 .map_sg = calgary_map_sg,
18427diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
18428index 6ac3931..42b4414 100644
18429--- a/arch/x86/kernel/pci-dma.c
18430+++ b/arch/x86/kernel/pci-dma.c
18431@@ -14,7 +14,7 @@
18432
18433 static int forbid_dac __read_mostly;
18434
18435-struct dma_map_ops *dma_ops;
18436+const struct dma_map_ops *dma_ops;
18437 EXPORT_SYMBOL(dma_ops);
18438
18439 static int iommu_sac_force __read_mostly;
18440@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
18441
18442 int dma_supported(struct device *dev, u64 mask)
18443 {
18444- struct dma_map_ops *ops = get_dma_ops(dev);
18445+ const struct dma_map_ops *ops = get_dma_ops(dev);
18446
18447 #ifdef CONFIG_PCI
18448 if (mask > 0xffffffff && forbid_dac > 0) {
18449diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
18450index 1c76691..e3632db 100644
18451--- a/arch/x86/kernel/pci-gart_64.c
18452+++ b/arch/x86/kernel/pci-gart_64.c
18453@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
18454 return -1;
18455 }
18456
18457-static struct dma_map_ops gart_dma_ops = {
18458+static const struct dma_map_ops gart_dma_ops = {
18459 .map_sg = gart_map_sg,
18460 .unmap_sg = gart_unmap_sg,
18461 .map_page = gart_map_page,
18462diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
18463index a3933d4..c898869 100644
18464--- a/arch/x86/kernel/pci-nommu.c
18465+++ b/arch/x86/kernel/pci-nommu.c
18466@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
18467 flush_write_buffers();
18468 }
18469
18470-struct dma_map_ops nommu_dma_ops = {
18471+const struct dma_map_ops nommu_dma_ops = {
18472 .alloc_coherent = dma_generic_alloc_coherent,
18473 .free_coherent = nommu_free_coherent,
18474 .map_sg = nommu_map_sg,
18475diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
18476index aaa6b78..4de1881 100644
18477--- a/arch/x86/kernel/pci-swiotlb.c
18478+++ b/arch/x86/kernel/pci-swiotlb.c
18479@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18480 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
18481 }
18482
18483-static struct dma_map_ops swiotlb_dma_ops = {
18484+static const struct dma_map_ops swiotlb_dma_ops = {
18485 .mapping_error = swiotlb_dma_mapping_error,
18486 .alloc_coherent = x86_swiotlb_alloc_coherent,
18487 .free_coherent = swiotlb_free_coherent,
18488diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18489index fc6c84d..0312ca2 100644
18490--- a/arch/x86/kernel/process.c
18491+++ b/arch/x86/kernel/process.c
18492@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
18493
18494 void free_thread_info(struct thread_info *ti)
18495 {
18496- free_thread_xstate(ti->task);
18497 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
18498 }
18499
18500+static struct kmem_cache *task_struct_cachep;
18501+
18502 void arch_task_cache_init(void)
18503 {
18504- task_xstate_cachep =
18505- kmem_cache_create("task_xstate", xstate_size,
18506+ /* create a slab on which task_structs can be allocated */
18507+ task_struct_cachep =
18508+ kmem_cache_create("task_struct", sizeof(struct task_struct),
18509+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18510+
18511+ task_xstate_cachep =
18512+ kmem_cache_create("task_xstate", xstate_size,
18513 __alignof__(union thread_xstate),
18514- SLAB_PANIC | SLAB_NOTRACK, NULL);
18515+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18516+}
18517+
18518+struct task_struct *alloc_task_struct(void)
18519+{
18520+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
18521+}
18522+
18523+void free_task_struct(struct task_struct *task)
18524+{
18525+ free_thread_xstate(task);
18526+ kmem_cache_free(task_struct_cachep, task);
18527 }
18528
18529 /*
18530@@ -73,7 +90,7 @@ void exit_thread(void)
18531 unsigned long *bp = t->io_bitmap_ptr;
18532
18533 if (bp) {
18534- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18535+ struct tss_struct *tss = init_tss + get_cpu();
18536
18537 t->io_bitmap_ptr = NULL;
18538 clear_thread_flag(TIF_IO_BITMAP);
18539@@ -93,6 +110,9 @@ void flush_thread(void)
18540
18541 clear_tsk_thread_flag(tsk, TIF_DEBUG);
18542
18543+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18544+ loadsegment(gs, 0);
18545+#endif
18546 tsk->thread.debugreg0 = 0;
18547 tsk->thread.debugreg1 = 0;
18548 tsk->thread.debugreg2 = 0;
18549@@ -307,7 +327,7 @@ void default_idle(void)
18550 EXPORT_SYMBOL(default_idle);
18551 #endif
18552
18553-void stop_this_cpu(void *dummy)
18554+__noreturn void stop_this_cpu(void *dummy)
18555 {
18556 local_irq_disable();
18557 /*
18558@@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
18559 }
18560 early_param("idle", idle_setup);
18561
18562-unsigned long arch_align_stack(unsigned long sp)
18563+#ifdef CONFIG_PAX_RANDKSTACK
18564+void pax_randomize_kstack(struct pt_regs *regs)
18565 {
18566- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18567- sp -= get_random_int() % 8192;
18568- return sp & ~0xf;
18569-}
18570+ struct thread_struct *thread = &current->thread;
18571+ unsigned long time;
18572
18573-unsigned long arch_randomize_brk(struct mm_struct *mm)
18574-{
18575- unsigned long range_end = mm->brk + 0x02000000;
18576- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18577+ if (!randomize_va_space)
18578+ return;
18579+
18580+ if (v8086_mode(regs))
18581+ return;
18582+
18583+ rdtscl(time);
18584+
18585+ /* P4 seems to return a 0 LSB, ignore it */
18586+#ifdef CONFIG_MPENTIUM4
18587+ time &= 0x3EUL;
18588+ time <<= 2;
18589+#elif defined(CONFIG_X86_64)
18590+ time &= 0xFUL;
18591+ time <<= 4;
18592+#else
18593+ time &= 0x1FUL;
18594+ time <<= 3;
18595+#endif
18596+
18597+ thread->sp0 ^= time;
18598+ load_sp0(init_tss + smp_processor_id(), thread);
18599+
18600+#ifdef CONFIG_X86_64
18601+ percpu_write(kernel_stack, thread->sp0);
18602+#endif
18603 }
18604+#endif
18605
18606diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18607index c40c432..6e1df72 100644
18608--- a/arch/x86/kernel/process_32.c
18609+++ b/arch/x86/kernel/process_32.c
18610@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18611 unsigned long thread_saved_pc(struct task_struct *tsk)
18612 {
18613 return ((unsigned long *)tsk->thread.sp)[3];
18614+//XXX return tsk->thread.eip;
18615 }
18616
18617 #ifndef CONFIG_SMP
18618@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
18619 unsigned short ss, gs;
18620 const char *board;
18621
18622- if (user_mode_vm(regs)) {
18623+ if (user_mode(regs)) {
18624 sp = regs->sp;
18625 ss = regs->ss & 0xffff;
18626- gs = get_user_gs(regs);
18627 } else {
18628 sp = (unsigned long) (&regs->sp);
18629 savesegment(ss, ss);
18630- savesegment(gs, gs);
18631 }
18632+ gs = get_user_gs(regs);
18633
18634 printk("\n");
18635
18636@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18637 regs.bx = (unsigned long) fn;
18638 regs.dx = (unsigned long) arg;
18639
18640- regs.ds = __USER_DS;
18641- regs.es = __USER_DS;
18642+ regs.ds = __KERNEL_DS;
18643+ regs.es = __KERNEL_DS;
18644 regs.fs = __KERNEL_PERCPU;
18645- regs.gs = __KERNEL_STACK_CANARY;
18646+ savesegment(gs, regs.gs);
18647 regs.orig_ax = -1;
18648 regs.ip = (unsigned long) kernel_thread_helper;
18649 regs.cs = __KERNEL_CS | get_kernel_rpl();
18650@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18651 struct task_struct *tsk;
18652 int err;
18653
18654- childregs = task_pt_regs(p);
18655+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18656 *childregs = *regs;
18657 childregs->ax = 0;
18658 childregs->sp = sp;
18659
18660 p->thread.sp = (unsigned long) childregs;
18661 p->thread.sp0 = (unsigned long) (childregs+1);
18662+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18663
18664 p->thread.ip = (unsigned long) ret_from_fork;
18665
18666@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18667 struct thread_struct *prev = &prev_p->thread,
18668 *next = &next_p->thread;
18669 int cpu = smp_processor_id();
18670- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18671+ struct tss_struct *tss = init_tss + cpu;
18672 bool preload_fpu;
18673
18674 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18675@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18676 */
18677 lazy_save_gs(prev->gs);
18678
18679+#ifdef CONFIG_PAX_MEMORY_UDEREF
18680+ __set_fs(task_thread_info(next_p)->addr_limit);
18681+#endif
18682+
18683 /*
18684 * Load the per-thread Thread-Local Storage descriptor.
18685 */
18686@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18687 */
18688 arch_end_context_switch(next_p);
18689
18690+ percpu_write(current_task, next_p);
18691+ percpu_write(current_tinfo, &next_p->tinfo);
18692+
18693 if (preload_fpu)
18694 __math_state_restore();
18695
18696@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18697 if (prev->gs | next->gs)
18698 lazy_load_gs(next->gs);
18699
18700- percpu_write(current_task, next_p);
18701-
18702 return prev_p;
18703 }
18704
18705@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
18706 } while (count++ < 16);
18707 return 0;
18708 }
18709-
18710diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18711index 39493bc..196816d 100644
18712--- a/arch/x86/kernel/process_64.c
18713+++ b/arch/x86/kernel/process_64.c
18714@@ -91,7 +91,7 @@ static void __exit_idle(void)
18715 void exit_idle(void)
18716 {
18717 /* idle loop has pid 0 */
18718- if (current->pid)
18719+ if (task_pid_nr(current))
18720 return;
18721 __exit_idle();
18722 }
18723@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
18724 if (!board)
18725 board = "";
18726 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
18727- current->pid, current->comm, print_tainted(),
18728+ task_pid_nr(current), current->comm, print_tainted(),
18729 init_utsname()->release,
18730 (int)strcspn(init_utsname()->version, " "),
18731 init_utsname()->version, board);
18732@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18733 struct pt_regs *childregs;
18734 struct task_struct *me = current;
18735
18736- childregs = ((struct pt_regs *)
18737- (THREAD_SIZE + task_stack_page(p))) - 1;
18738+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18739 *childregs = *regs;
18740
18741 childregs->ax = 0;
18742@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18743 p->thread.sp = (unsigned long) childregs;
18744 p->thread.sp0 = (unsigned long) (childregs+1);
18745 p->thread.usersp = me->thread.usersp;
18746+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18747
18748 set_tsk_thread_flag(p, TIF_FORK);
18749
18750@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18751 struct thread_struct *prev = &prev_p->thread;
18752 struct thread_struct *next = &next_p->thread;
18753 int cpu = smp_processor_id();
18754- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18755+ struct tss_struct *tss = init_tss + cpu;
18756 unsigned fsindex, gsindex;
18757 bool preload_fpu;
18758
18759@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18760 prev->usersp = percpu_read(old_rsp);
18761 percpu_write(old_rsp, next->usersp);
18762 percpu_write(current_task, next_p);
18763+ percpu_write(current_tinfo, &next_p->tinfo);
18764
18765- percpu_write(kernel_stack,
18766- (unsigned long)task_stack_page(next_p) +
18767- THREAD_SIZE - KERNEL_STACK_OFFSET);
18768+ percpu_write(kernel_stack, next->sp0);
18769
18770 /*
18771 * Now maybe reload the debug registers and handle I/O bitmaps
18772@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
18773 if (!p || p == current || p->state == TASK_RUNNING)
18774 return 0;
18775 stack = (unsigned long)task_stack_page(p);
18776- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18777+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18778 return 0;
18779 fp = *(u64 *)(p->thread.sp);
18780 do {
18781- if (fp < (unsigned long)stack ||
18782- fp >= (unsigned long)stack+THREAD_SIZE)
18783+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18784 return 0;
18785 ip = *(u64 *)(fp+8);
18786 if (!in_sched_functions(ip))
18787diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18788index c06acdd..3f5fff5 100644
18789--- a/arch/x86/kernel/ptrace.c
18790+++ b/arch/x86/kernel/ptrace.c
18791@@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
18792 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18793 {
18794 int ret;
18795- unsigned long __user *datap = (unsigned long __user *)data;
18796+ unsigned long __user *datap = (__force unsigned long __user *)data;
18797
18798 switch (request) {
18799 /* read the word at location addr in the USER area. */
18800@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18801 if (addr < 0)
18802 return -EIO;
18803 ret = do_get_thread_area(child, addr,
18804- (struct user_desc __user *) data);
18805+ (__force struct user_desc __user *) data);
18806 break;
18807
18808 case PTRACE_SET_THREAD_AREA:
18809 if (addr < 0)
18810 return -EIO;
18811 ret = do_set_thread_area(child, addr,
18812- (struct user_desc __user *) data, 0);
18813+ (__force struct user_desc __user *) data, 0);
18814 break;
18815 #endif
18816
18817@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18818 #ifdef CONFIG_X86_PTRACE_BTS
18819 case PTRACE_BTS_CONFIG:
18820 ret = ptrace_bts_config
18821- (child, data, (struct ptrace_bts_config __user *)addr);
18822+ (child, data, (__force struct ptrace_bts_config __user *)addr);
18823 break;
18824
18825 case PTRACE_BTS_STATUS:
18826 ret = ptrace_bts_status
18827- (child, data, (struct ptrace_bts_config __user *)addr);
18828+ (child, data, (__force struct ptrace_bts_config __user *)addr);
18829 break;
18830
18831 case PTRACE_BTS_SIZE:
18832@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18833
18834 case PTRACE_BTS_GET:
18835 ret = ptrace_bts_read_record
18836- (child, data, (struct bts_struct __user *) addr);
18837+ (child, data, (__force struct bts_struct __user *) addr);
18838 break;
18839
18840 case PTRACE_BTS_CLEAR:
18841@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18842
18843 case PTRACE_BTS_DRAIN:
18844 ret = ptrace_bts_drain
18845- (child, data, (struct bts_struct __user *) addr);
18846+ (child, data, (__force struct bts_struct __user *) addr);
18847 break;
18848 #endif /* CONFIG_X86_PTRACE_BTS */
18849
18850@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18851 info.si_code = si_code;
18852
18853 /* User-mode ip? */
18854- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
18855+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
18856
18857 /* Send us the fake SIGTRAP */
18858 force_sig_info(SIGTRAP, &info, tsk);
18859@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18860 * We must return the syscall number to actually look up in the table.
18861 * This can be -1L to skip running any syscall at all.
18862 */
18863-asmregparm long syscall_trace_enter(struct pt_regs *regs)
18864+long syscall_trace_enter(struct pt_regs *regs)
18865 {
18866 long ret = 0;
18867
18868@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
18869 return ret ?: regs->orig_ax;
18870 }
18871
18872-asmregparm void syscall_trace_leave(struct pt_regs *regs)
18873+void syscall_trace_leave(struct pt_regs *regs)
18874 {
18875 if (unlikely(current->audit_context))
18876 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
18877diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18878index cf98100..e76e03d 100644
18879--- a/arch/x86/kernel/reboot.c
18880+++ b/arch/x86/kernel/reboot.c
18881@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
18882 EXPORT_SYMBOL(pm_power_off);
18883
18884 static const struct desc_ptr no_idt = {};
18885-static int reboot_mode;
18886+static unsigned short reboot_mode;
18887 enum reboot_type reboot_type = BOOT_KBD;
18888 int reboot_force;
18889
18890@@ -292,12 +292,12 @@ core_initcall(reboot_init);
18891 controller to pulse the CPU reset line, which is more thorough, but
18892 doesn't work with at least one type of 486 motherboard. It is easy
18893 to stop this code working; hence the copious comments. */
18894-static const unsigned long long
18895-real_mode_gdt_entries [3] =
18896+static struct desc_struct
18897+real_mode_gdt_entries [3] __read_only =
18898 {
18899- 0x0000000000000000ULL, /* Null descriptor */
18900- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
18901- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
18902+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
18903+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
18904+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
18905 };
18906
18907 static const struct desc_ptr
18908@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
18909 * specified by the code and length parameters.
18910 * We assume that length will aways be less that 100!
18911 */
18912-void machine_real_restart(const unsigned char *code, int length)
18913+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
18914 {
18915 local_irq_disable();
18916
18917@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
18918 /* Remap the kernel at virtual address zero, as well as offset zero
18919 from the kernel segment. This assumes the kernel segment starts at
18920 virtual address PAGE_OFFSET. */
18921- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18922- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
18923+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18924+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
18925
18926 /*
18927 * Use `swapper_pg_dir' as our page directory.
18928@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
18929 boot)". This seems like a fairly standard thing that gets set by
18930 REBOOT.COM programs, and the previous reset routine did this
18931 too. */
18932- *((unsigned short *)0x472) = reboot_mode;
18933+ *(unsigned short *)(__va(0x472)) = reboot_mode;
18934
18935 /* For the switch to real mode, copy some code to low memory. It has
18936 to be in the first 64k because it is running in 16-bit mode, and it
18937 has to have the same physical and virtual address, because it turns
18938 off paging. Copy it near the end of the first page, out of the way
18939 of BIOS variables. */
18940- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
18941- real_mode_switch, sizeof (real_mode_switch));
18942- memcpy((void *)(0x1000 - 100), code, length);
18943+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
18944+ memcpy(__va(0x1000 - 100), code, length);
18945
18946 /* Set up the IDT for real mode. */
18947 load_idt(&real_mode_idt);
18948@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
18949 __asm__ __volatile__ ("ljmp $0x0008,%0"
18950 :
18951 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
18952+ do { } while (1);
18953 }
18954 #ifdef CONFIG_APM_MODULE
18955 EXPORT_SYMBOL(machine_real_restart);
18956@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18957 {
18958 }
18959
18960-static void native_machine_emergency_restart(void)
18961+__noreturn static void native_machine_emergency_restart(void)
18962 {
18963 int i;
18964
18965@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
18966 #endif
18967 }
18968
18969-static void __machine_emergency_restart(int emergency)
18970+static __noreturn void __machine_emergency_restart(int emergency)
18971 {
18972 reboot_emergency = emergency;
18973 machine_ops.emergency_restart();
18974 }
18975
18976-static void native_machine_restart(char *__unused)
18977+static __noreturn void native_machine_restart(char *__unused)
18978 {
18979 printk("machine restart\n");
18980
18981@@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
18982 __machine_emergency_restart(0);
18983 }
18984
18985-static void native_machine_halt(void)
18986+static __noreturn void native_machine_halt(void)
18987 {
18988 /* stop other cpus and apics */
18989 machine_shutdown();
18990@@ -685,7 +685,7 @@ static void native_machine_halt(void)
18991 stop_this_cpu(NULL);
18992 }
18993
18994-static void native_machine_power_off(void)
18995+__noreturn static void native_machine_power_off(void)
18996 {
18997 if (pm_power_off) {
18998 if (!reboot_force)
18999@@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19000 }
19001 /* a fallback in case there is no PM info available */
19002 tboot_shutdown(TB_SHUTDOWN_HALT);
19003+ do { } while (1);
19004 }
19005
19006 struct machine_ops machine_ops = {
19007diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19008index 7a6f3b3..976a959 100644
19009--- a/arch/x86/kernel/relocate_kernel_64.S
19010+++ b/arch/x86/kernel/relocate_kernel_64.S
19011@@ -11,6 +11,7 @@
19012 #include <asm/kexec.h>
19013 #include <asm/processor-flags.h>
19014 #include <asm/pgtable_types.h>
19015+#include <asm/alternative-asm.h>
19016
19017 /*
19018 * Must be relocatable PIC code callable as a C function
19019@@ -167,6 +168,7 @@ identity_mapped:
19020 xorq %r14, %r14
19021 xorq %r15, %r15
19022
19023+ pax_force_retaddr 0, 1
19024 ret
19025
19026 1:
19027diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19028index 5449a26..0b6c759 100644
19029--- a/arch/x86/kernel/setup.c
19030+++ b/arch/x86/kernel/setup.c
19031@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19032
19033 if (!boot_params.hdr.root_flags)
19034 root_mountflags &= ~MS_RDONLY;
19035- init_mm.start_code = (unsigned long) _text;
19036- init_mm.end_code = (unsigned long) _etext;
19037+ init_mm.start_code = ktla_ktva((unsigned long) _text);
19038+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
19039 init_mm.end_data = (unsigned long) _edata;
19040 init_mm.brk = _brk_end;
19041
19042- code_resource.start = virt_to_phys(_text);
19043- code_resource.end = virt_to_phys(_etext)-1;
19044- data_resource.start = virt_to_phys(_etext);
19045+ code_resource.start = virt_to_phys(ktla_ktva(_text));
19046+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19047+ data_resource.start = virt_to_phys(_sdata);
19048 data_resource.end = virt_to_phys(_edata)-1;
19049 bss_resource.start = virt_to_phys(&__bss_start);
19050 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19051diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19052index d559af9..524c6ad 100644
19053--- a/arch/x86/kernel/setup_percpu.c
19054+++ b/arch/x86/kernel/setup_percpu.c
19055@@ -25,19 +25,17 @@
19056 # define DBG(x...)
19057 #endif
19058
19059-DEFINE_PER_CPU(int, cpu_number);
19060+#ifdef CONFIG_SMP
19061+DEFINE_PER_CPU(unsigned int, cpu_number);
19062 EXPORT_PER_CPU_SYMBOL(cpu_number);
19063+#endif
19064
19065-#ifdef CONFIG_X86_64
19066 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19067-#else
19068-#define BOOT_PERCPU_OFFSET 0
19069-#endif
19070
19071 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19072 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19073
19074-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19075+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19076 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19077 };
19078 EXPORT_SYMBOL(__per_cpu_offset);
19079@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19080 {
19081 #ifdef CONFIG_X86_32
19082 struct desc_struct gdt;
19083+ unsigned long base = per_cpu_offset(cpu);
19084
19085- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19086- 0x2 | DESCTYPE_S, 0x8);
19087- gdt.s = 1;
19088+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19089+ 0x83 | DESCTYPE_S, 0xC);
19090 write_gdt_entry(get_cpu_gdt_table(cpu),
19091 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19092 #endif
19093@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19094 /* alrighty, percpu areas up and running */
19095 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19096 for_each_possible_cpu(cpu) {
19097+#ifdef CONFIG_CC_STACKPROTECTOR
19098+#ifdef CONFIG_X86_32
19099+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
19100+#endif
19101+#endif
19102 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19103 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19104 per_cpu(cpu_number, cpu) = cpu;
19105@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19106 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19107 #endif
19108 #endif
19109+#ifdef CONFIG_CC_STACKPROTECTOR
19110+#ifdef CONFIG_X86_32
19111+ if (!cpu)
19112+ per_cpu(stack_canary.canary, cpu) = canary;
19113+#endif
19114+#endif
19115 /*
19116 * Up to this point, the boot CPU has been using .data.init
19117 * area. Reload any changed state for the boot CPU.
19118diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19119index 6a44a76..a9287a1 100644
19120--- a/arch/x86/kernel/signal.c
19121+++ b/arch/x86/kernel/signal.c
19122@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19123 * Align the stack pointer according to the i386 ABI,
19124 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19125 */
19126- sp = ((sp + 4) & -16ul) - 4;
19127+ sp = ((sp - 12) & -16ul) - 4;
19128 #else /* !CONFIG_X86_32 */
19129 sp = round_down(sp, 16) - 8;
19130 #endif
19131@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19132 * Return an always-bogus address instead so we will die with SIGSEGV.
19133 */
19134 if (onsigstack && !likely(on_sig_stack(sp)))
19135- return (void __user *)-1L;
19136+ return (__force void __user *)-1L;
19137
19138 /* save i387 state */
19139 if (used_math() && save_i387_xstate(*fpstate) < 0)
19140- return (void __user *)-1L;
19141+ return (__force void __user *)-1L;
19142
19143 return (void __user *)sp;
19144 }
19145@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19146 }
19147
19148 if (current->mm->context.vdso)
19149- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19150+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19151 else
19152- restorer = &frame->retcode;
19153+ restorer = (void __user *)&frame->retcode;
19154 if (ka->sa.sa_flags & SA_RESTORER)
19155 restorer = ka->sa.sa_restorer;
19156
19157@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19158 * reasons and because gdb uses it as a signature to notice
19159 * signal handler stack frames.
19160 */
19161- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19162+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19163
19164 if (err)
19165 return -EFAULT;
19166@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19167 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19168
19169 /* Set up to return from userspace. */
19170- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19171+ if (current->mm->context.vdso)
19172+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19173+ else
19174+ restorer = (void __user *)&frame->retcode;
19175 if (ka->sa.sa_flags & SA_RESTORER)
19176 restorer = ka->sa.sa_restorer;
19177 put_user_ex(restorer, &frame->pretcode);
19178@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19179 * reasons and because gdb uses it as a signature to notice
19180 * signal handler stack frames.
19181 */
19182- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19183+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19184 } put_user_catch(err);
19185
19186 if (err)
19187@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19188 int signr;
19189 sigset_t *oldset;
19190
19191+ pax_track_stack();
19192+
19193 /*
19194 * We want the common case to go fast, which is why we may in certain
19195 * cases get here from kernel mode. Just return without doing anything
19196@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19197 * X86_32: vm86 regs switched out by assembly code before reaching
19198 * here, so testing against kernel CS suffices.
19199 */
19200- if (!user_mode(regs))
19201+ if (!user_mode_novm(regs))
19202 return;
19203
19204 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19205diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19206index 7e8e905..64d5c32 100644
19207--- a/arch/x86/kernel/smpboot.c
19208+++ b/arch/x86/kernel/smpboot.c
19209@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19210 */
19211 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19212
19213-void cpu_hotplug_driver_lock()
19214+void cpu_hotplug_driver_lock(void)
19215 {
19216- mutex_lock(&x86_cpu_hotplug_driver_mutex);
19217+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
19218 }
19219
19220-void cpu_hotplug_driver_unlock()
19221+void cpu_hotplug_driver_unlock(void)
19222 {
19223- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19224+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19225 }
19226
19227 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
19228@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
19229 * target processor state.
19230 */
19231 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
19232- (unsigned long)stack_start.sp);
19233+ stack_start);
19234
19235 /*
19236 * Run STARTUP IPI loop.
19237@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19238 set_idle_for_cpu(cpu, c_idle.idle);
19239 do_rest:
19240 per_cpu(current_task, cpu) = c_idle.idle;
19241+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19242 #ifdef CONFIG_X86_32
19243 /* Stack for startup_32 can be just as for start_secondary onwards */
19244 irq_ctx_init(cpu);
19245@@ -750,13 +751,15 @@ do_rest:
19246 #else
19247 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19248 initial_gs = per_cpu_offset(cpu);
19249- per_cpu(kernel_stack, cpu) =
19250- (unsigned long)task_stack_page(c_idle.idle) -
19251- KERNEL_STACK_OFFSET + THREAD_SIZE;
19252+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19253 #endif
19254+
19255+ pax_open_kernel();
19256 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19257+ pax_close_kernel();
19258+
19259 initial_code = (unsigned long)start_secondary;
19260- stack_start.sp = (void *) c_idle.idle->thread.sp;
19261+ stack_start = c_idle.idle->thread.sp;
19262
19263 /* start_ip had better be page-aligned! */
19264 start_ip = setup_trampoline();
19265@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19266
19267 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19268
19269+#ifdef CONFIG_PAX_PER_CPU_PGD
19270+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19271+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19272+ KERNEL_PGD_PTRS);
19273+#endif
19274+
19275 err = do_boot_cpu(apicid, cpu);
19276
19277 if (err) {
19278diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19279index 3149032..14f1053 100644
19280--- a/arch/x86/kernel/step.c
19281+++ b/arch/x86/kernel/step.c
19282@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19283 struct desc_struct *desc;
19284 unsigned long base;
19285
19286- seg &= ~7UL;
19287+ seg >>= 3;
19288
19289 mutex_lock(&child->mm->context.lock);
19290- if (unlikely((seg >> 3) >= child->mm->context.size))
19291+ if (unlikely(seg >= child->mm->context.size))
19292 addr = -1L; /* bogus selector, access would fault */
19293 else {
19294 desc = child->mm->context.ldt + seg;
19295@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19296 addr += base;
19297 }
19298 mutex_unlock(&child->mm->context.lock);
19299- }
19300+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19301+ addr = ktla_ktva(addr);
19302
19303 return addr;
19304 }
19305@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19306 unsigned char opcode[15];
19307 unsigned long addr = convert_ip_to_linear(child, regs);
19308
19309+ if (addr == -EINVAL)
19310+ return 0;
19311+
19312 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19313 for (i = 0; i < copied; i++) {
19314 switch (opcode[i]) {
19315@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19316
19317 #ifdef CONFIG_X86_64
19318 case 0x40 ... 0x4f:
19319- if (regs->cs != __USER_CS)
19320+ if ((regs->cs & 0xffff) != __USER_CS)
19321 /* 32-bit mode: register increment */
19322 return 0;
19323 /* 64-bit mode: REX prefix */
19324diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19325index dee1ff7..a397f7f 100644
19326--- a/arch/x86/kernel/sys_i386_32.c
19327+++ b/arch/x86/kernel/sys_i386_32.c
19328@@ -24,6 +24,21 @@
19329
19330 #include <asm/syscalls.h>
19331
19332+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19333+{
19334+ unsigned long pax_task_size = TASK_SIZE;
19335+
19336+#ifdef CONFIG_PAX_SEGMEXEC
19337+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19338+ pax_task_size = SEGMEXEC_TASK_SIZE;
19339+#endif
19340+
19341+ if (len > pax_task_size || addr > pax_task_size - len)
19342+ return -EINVAL;
19343+
19344+ return 0;
19345+}
19346+
19347 /*
19348 * Perform the select(nd, in, out, ex, tv) and mmap() system
19349 * calls. Linux/i386 didn't use to be able to handle more than
19350@@ -58,6 +73,212 @@ out:
19351 return err;
19352 }
19353
19354+unsigned long
19355+arch_get_unmapped_area(struct file *filp, unsigned long addr,
19356+ unsigned long len, unsigned long pgoff, unsigned long flags)
19357+{
19358+ struct mm_struct *mm = current->mm;
19359+ struct vm_area_struct *vma;
19360+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19361+
19362+#ifdef CONFIG_PAX_SEGMEXEC
19363+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19364+ pax_task_size = SEGMEXEC_TASK_SIZE;
19365+#endif
19366+
19367+ pax_task_size -= PAGE_SIZE;
19368+
19369+ if (len > pax_task_size)
19370+ return -ENOMEM;
19371+
19372+ if (flags & MAP_FIXED)
19373+ return addr;
19374+
19375+#ifdef CONFIG_PAX_RANDMMAP
19376+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19377+#endif
19378+
19379+ if (addr) {
19380+ addr = PAGE_ALIGN(addr);
19381+ if (pax_task_size - len >= addr) {
19382+ vma = find_vma(mm, addr);
19383+ if (check_heap_stack_gap(vma, addr, len))
19384+ return addr;
19385+ }
19386+ }
19387+ if (len > mm->cached_hole_size) {
19388+ start_addr = addr = mm->free_area_cache;
19389+ } else {
19390+ start_addr = addr = mm->mmap_base;
19391+ mm->cached_hole_size = 0;
19392+ }
19393+
19394+#ifdef CONFIG_PAX_PAGEEXEC
19395+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19396+ start_addr = 0x00110000UL;
19397+
19398+#ifdef CONFIG_PAX_RANDMMAP
19399+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19400+ start_addr += mm->delta_mmap & 0x03FFF000UL;
19401+#endif
19402+
19403+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19404+ start_addr = addr = mm->mmap_base;
19405+ else
19406+ addr = start_addr;
19407+ }
19408+#endif
19409+
19410+full_search:
19411+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19412+ /* At this point: (!vma || addr < vma->vm_end). */
19413+ if (pax_task_size - len < addr) {
19414+ /*
19415+ * Start a new search - just in case we missed
19416+ * some holes.
19417+ */
19418+ if (start_addr != mm->mmap_base) {
19419+ start_addr = addr = mm->mmap_base;
19420+ mm->cached_hole_size = 0;
19421+ goto full_search;
19422+ }
19423+ return -ENOMEM;
19424+ }
19425+ if (check_heap_stack_gap(vma, addr, len))
19426+ break;
19427+ if (addr + mm->cached_hole_size < vma->vm_start)
19428+ mm->cached_hole_size = vma->vm_start - addr;
19429+ addr = vma->vm_end;
19430+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
19431+ start_addr = addr = mm->mmap_base;
19432+ mm->cached_hole_size = 0;
19433+ goto full_search;
19434+ }
19435+ }
19436+
19437+ /*
19438+ * Remember the place where we stopped the search:
19439+ */
19440+ mm->free_area_cache = addr + len;
19441+ return addr;
19442+}
19443+
19444+unsigned long
19445+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19446+ const unsigned long len, const unsigned long pgoff,
19447+ const unsigned long flags)
19448+{
19449+ struct vm_area_struct *vma;
19450+ struct mm_struct *mm = current->mm;
19451+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19452+
19453+#ifdef CONFIG_PAX_SEGMEXEC
19454+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19455+ pax_task_size = SEGMEXEC_TASK_SIZE;
19456+#endif
19457+
19458+ pax_task_size -= PAGE_SIZE;
19459+
19460+ /* requested length too big for entire address space */
19461+ if (len > pax_task_size)
19462+ return -ENOMEM;
19463+
19464+ if (flags & MAP_FIXED)
19465+ return addr;
19466+
19467+#ifdef CONFIG_PAX_PAGEEXEC
19468+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19469+ goto bottomup;
19470+#endif
19471+
19472+#ifdef CONFIG_PAX_RANDMMAP
19473+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19474+#endif
19475+
19476+ /* requesting a specific address */
19477+ if (addr) {
19478+ addr = PAGE_ALIGN(addr);
19479+ if (pax_task_size - len >= addr) {
19480+ vma = find_vma(mm, addr);
19481+ if (check_heap_stack_gap(vma, addr, len))
19482+ return addr;
19483+ }
19484+ }
19485+
19486+ /* check if free_area_cache is useful for us */
19487+ if (len <= mm->cached_hole_size) {
19488+ mm->cached_hole_size = 0;
19489+ mm->free_area_cache = mm->mmap_base;
19490+ }
19491+
19492+ /* either no address requested or can't fit in requested address hole */
19493+ addr = mm->free_area_cache;
19494+
19495+ /* make sure it can fit in the remaining address space */
19496+ if (addr > len) {
19497+ vma = find_vma(mm, addr-len);
19498+ if (check_heap_stack_gap(vma, addr - len, len))
19499+ /* remember the address as a hint for next time */
19500+ return (mm->free_area_cache = addr-len);
19501+ }
19502+
19503+ if (mm->mmap_base < len)
19504+ goto bottomup;
19505+
19506+ addr = mm->mmap_base-len;
19507+
19508+ do {
19509+ /*
19510+ * Lookup failure means no vma is above this address,
19511+ * else if new region fits below vma->vm_start,
19512+ * return with success:
19513+ */
19514+ vma = find_vma(mm, addr);
19515+ if (check_heap_stack_gap(vma, addr, len))
19516+ /* remember the address as a hint for next time */
19517+ return (mm->free_area_cache = addr);
19518+
19519+ /* remember the largest hole we saw so far */
19520+ if (addr + mm->cached_hole_size < vma->vm_start)
19521+ mm->cached_hole_size = vma->vm_start - addr;
19522+
19523+ /* try just below the current vma->vm_start */
19524+ addr = skip_heap_stack_gap(vma, len);
19525+ } while (!IS_ERR_VALUE(addr));
19526+
19527+bottomup:
19528+ /*
19529+ * A failed mmap() very likely causes application failure,
19530+ * so fall back to the bottom-up function here. This scenario
19531+ * can happen with large stack limits and large mmap()
19532+ * allocations.
19533+ */
19534+
19535+#ifdef CONFIG_PAX_SEGMEXEC
19536+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19537+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19538+ else
19539+#endif
19540+
19541+ mm->mmap_base = TASK_UNMAPPED_BASE;
19542+
19543+#ifdef CONFIG_PAX_RANDMMAP
19544+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19545+ mm->mmap_base += mm->delta_mmap;
19546+#endif
19547+
19548+ mm->free_area_cache = mm->mmap_base;
19549+ mm->cached_hole_size = ~0UL;
19550+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19551+ /*
19552+ * Restore the topdown base:
19553+ */
19554+ mm->mmap_base = base;
19555+ mm->free_area_cache = base;
19556+ mm->cached_hole_size = ~0UL;
19557+
19558+ return addr;
19559+}
19560
19561 struct sel_arg_struct {
19562 unsigned long n;
19563@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19564 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
19565 case SEMTIMEDOP:
19566 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
19567- (const struct timespec __user *)fifth);
19568+ (__force const struct timespec __user *)fifth);
19569
19570 case SEMGET:
19571 return sys_semget(first, second, third);
19572@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19573 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
19574 if (ret)
19575 return ret;
19576- return put_user(raddr, (ulong __user *) third);
19577+ return put_user(raddr, (__force ulong __user *) third);
19578 }
19579 case 1: /* iBCS2 emulator entry point */
19580 if (!segment_eq(get_fs(), get_ds()))
19581@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
19582
19583 return error;
19584 }
19585-
19586-
19587-/*
19588- * Do a system call from kernel instead of calling sys_execve so we
19589- * end up with proper pt_regs.
19590- */
19591-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
19592-{
19593- long __res;
19594- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
19595- : "=a" (__res)
19596- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
19597- return __res;
19598-}
19599diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19600index 8aa2057..b604bc1 100644
19601--- a/arch/x86/kernel/sys_x86_64.c
19602+++ b/arch/x86/kernel/sys_x86_64.c
19603@@ -32,8 +32,8 @@ out:
19604 return error;
19605 }
19606
19607-static void find_start_end(unsigned long flags, unsigned long *begin,
19608- unsigned long *end)
19609+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19610+ unsigned long *begin, unsigned long *end)
19611 {
19612 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19613 unsigned long new_begin;
19614@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19615 *begin = new_begin;
19616 }
19617 } else {
19618- *begin = TASK_UNMAPPED_BASE;
19619+ *begin = mm->mmap_base;
19620 *end = TASK_SIZE;
19621 }
19622 }
19623@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19624 if (flags & MAP_FIXED)
19625 return addr;
19626
19627- find_start_end(flags, &begin, &end);
19628+ find_start_end(mm, flags, &begin, &end);
19629
19630 if (len > end)
19631 return -ENOMEM;
19632
19633+#ifdef CONFIG_PAX_RANDMMAP
19634+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19635+#endif
19636+
19637 if (addr) {
19638 addr = PAGE_ALIGN(addr);
19639 vma = find_vma(mm, addr);
19640- if (end - len >= addr &&
19641- (!vma || addr + len <= vma->vm_start))
19642+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19643 return addr;
19644 }
19645 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19646@@ -106,7 +109,7 @@ full_search:
19647 }
19648 return -ENOMEM;
19649 }
19650- if (!vma || addr + len <= vma->vm_start) {
19651+ if (check_heap_stack_gap(vma, addr, len)) {
19652 /*
19653 * Remember the place where we stopped the search:
19654 */
19655@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19656 {
19657 struct vm_area_struct *vma;
19658 struct mm_struct *mm = current->mm;
19659- unsigned long addr = addr0;
19660+ unsigned long base = mm->mmap_base, addr = addr0;
19661
19662 /* requested length too big for entire address space */
19663 if (len > TASK_SIZE)
19664@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19665 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19666 goto bottomup;
19667
19668+#ifdef CONFIG_PAX_RANDMMAP
19669+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19670+#endif
19671+
19672 /* requesting a specific address */
19673 if (addr) {
19674 addr = PAGE_ALIGN(addr);
19675- vma = find_vma(mm, addr);
19676- if (TASK_SIZE - len >= addr &&
19677- (!vma || addr + len <= vma->vm_start))
19678- return addr;
19679+ if (TASK_SIZE - len >= addr) {
19680+ vma = find_vma(mm, addr);
19681+ if (check_heap_stack_gap(vma, addr, len))
19682+ return addr;
19683+ }
19684 }
19685
19686 /* check if free_area_cache is useful for us */
19687@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19688 /* make sure it can fit in the remaining address space */
19689 if (addr > len) {
19690 vma = find_vma(mm, addr-len);
19691- if (!vma || addr <= vma->vm_start)
19692+ if (check_heap_stack_gap(vma, addr - len, len))
19693 /* remember the address as a hint for next time */
19694 return mm->free_area_cache = addr-len;
19695 }
19696@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19697 * return with success:
19698 */
19699 vma = find_vma(mm, addr);
19700- if (!vma || addr+len <= vma->vm_start)
19701+ if (check_heap_stack_gap(vma, addr, len))
19702 /* remember the address as a hint for next time */
19703 return mm->free_area_cache = addr;
19704
19705@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19706 mm->cached_hole_size = vma->vm_start - addr;
19707
19708 /* try just below the current vma->vm_start */
19709- addr = vma->vm_start-len;
19710- } while (len < vma->vm_start);
19711+ addr = skip_heap_stack_gap(vma, len);
19712+ } while (!IS_ERR_VALUE(addr));
19713
19714 bottomup:
19715 /*
19716@@ -198,13 +206,21 @@ bottomup:
19717 * can happen with large stack limits and large mmap()
19718 * allocations.
19719 */
19720+ mm->mmap_base = TASK_UNMAPPED_BASE;
19721+
19722+#ifdef CONFIG_PAX_RANDMMAP
19723+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19724+ mm->mmap_base += mm->delta_mmap;
19725+#endif
19726+
19727+ mm->free_area_cache = mm->mmap_base;
19728 mm->cached_hole_size = ~0UL;
19729- mm->free_area_cache = TASK_UNMAPPED_BASE;
19730 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19731 /*
19732 * Restore the topdown base:
19733 */
19734- mm->free_area_cache = mm->mmap_base;
19735+ mm->mmap_base = base;
19736+ mm->free_area_cache = base;
19737 mm->cached_hole_size = ~0UL;
19738
19739 return addr;
19740diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
19741index 76d70a4..4c94a44 100644
19742--- a/arch/x86/kernel/syscall_table_32.S
19743+++ b/arch/x86/kernel/syscall_table_32.S
19744@@ -1,3 +1,4 @@
19745+.section .rodata,"a",@progbits
19746 ENTRY(sys_call_table)
19747 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
19748 .long sys_exit
19749diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19750index 46b8277..3349d55 100644
19751--- a/arch/x86/kernel/tboot.c
19752+++ b/arch/x86/kernel/tboot.c
19753@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
19754
19755 void tboot_shutdown(u32 shutdown_type)
19756 {
19757- void (*shutdown)(void);
19758+ void (* __noreturn shutdown)(void);
19759
19760 if (!tboot_enabled())
19761 return;
19762@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
19763
19764 switch_to_tboot_pt();
19765
19766- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19767+ shutdown = (void *)tboot->shutdown_entry;
19768 shutdown();
19769
19770 /* should not reach here */
19771@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19772 tboot_shutdown(acpi_shutdown_map[sleep_state]);
19773 }
19774
19775-static atomic_t ap_wfs_count;
19776+static atomic_unchecked_t ap_wfs_count;
19777
19778 static int tboot_wait_for_aps(int num_aps)
19779 {
19780@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19781 {
19782 switch (action) {
19783 case CPU_DYING:
19784- atomic_inc(&ap_wfs_count);
19785+ atomic_inc_unchecked(&ap_wfs_count);
19786 if (num_online_cpus() == 1)
19787- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19788+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19789 return NOTIFY_BAD;
19790 break;
19791 }
19792@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
19793
19794 tboot_create_trampoline();
19795
19796- atomic_set(&ap_wfs_count, 0);
19797+ atomic_set_unchecked(&ap_wfs_count, 0);
19798 register_hotcpu_notifier(&tboot_cpu_notifier);
19799 return 0;
19800 }
19801diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19802index be25734..87fe232 100644
19803--- a/arch/x86/kernel/time.c
19804+++ b/arch/x86/kernel/time.c
19805@@ -26,17 +26,13 @@
19806 int timer_ack;
19807 #endif
19808
19809-#ifdef CONFIG_X86_64
19810-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
19811-#endif
19812-
19813 unsigned long profile_pc(struct pt_regs *regs)
19814 {
19815 unsigned long pc = instruction_pointer(regs);
19816
19817- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19818+ if (!user_mode(regs) && in_lock_functions(pc)) {
19819 #ifdef CONFIG_FRAME_POINTER
19820- return *(unsigned long *)(regs->bp + sizeof(long));
19821+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19822 #else
19823 unsigned long *sp =
19824 (unsigned long *)kernel_stack_pointer(regs);
19825@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19826 * or above a saved flags. Eflags has bits 22-31 zero,
19827 * kernel addresses don't.
19828 */
19829+
19830+#ifdef CONFIG_PAX_KERNEXEC
19831+ return ktla_ktva(sp[0]);
19832+#else
19833 if (sp[0] >> 22)
19834 return sp[0];
19835 if (sp[1] >> 22)
19836 return sp[1];
19837 #endif
19838+
19839+#endif
19840 }
19841 return pc;
19842 }
19843diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19844index 6bb7b85..dd853e1 100644
19845--- a/arch/x86/kernel/tls.c
19846+++ b/arch/x86/kernel/tls.c
19847@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19848 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19849 return -EINVAL;
19850
19851+#ifdef CONFIG_PAX_SEGMEXEC
19852+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19853+ return -EINVAL;
19854+#endif
19855+
19856 set_tls_desc(p, idx, &info, 1);
19857
19858 return 0;
19859diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19860index 8508237..229b664 100644
19861--- a/arch/x86/kernel/trampoline_32.S
19862+++ b/arch/x86/kernel/trampoline_32.S
19863@@ -32,6 +32,12 @@
19864 #include <asm/segment.h>
19865 #include <asm/page_types.h>
19866
19867+#ifdef CONFIG_PAX_KERNEXEC
19868+#define ta(X) (X)
19869+#else
19870+#define ta(X) ((X) - __PAGE_OFFSET)
19871+#endif
19872+
19873 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
19874 __CPUINITRODATA
19875 .code16
19876@@ -60,7 +66,7 @@ r_base = .
19877 inc %ax # protected mode (PE) bit
19878 lmsw %ax # into protected mode
19879 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19880- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19881+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
19882
19883 # These need to be in the same 64K segment as the above;
19884 # hence we don't use the boot_gdt_descr defined in head.S
19885diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19886index 3af2dff..ba8aa49 100644
19887--- a/arch/x86/kernel/trampoline_64.S
19888+++ b/arch/x86/kernel/trampoline_64.S
19889@@ -91,7 +91,7 @@ startup_32:
19890 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19891 movl %eax, %ds
19892
19893- movl $X86_CR4_PAE, %eax
19894+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19895 movl %eax, %cr4 # Enable PAE mode
19896
19897 # Setup trampoline 4 level pagetables
19898@@ -127,7 +127,7 @@ startup_64:
19899 no_longmode:
19900 hlt
19901 jmp no_longmode
19902-#include "verify_cpu_64.S"
19903+#include "verify_cpu.S"
19904
19905 # Careful these need to be in the same 64K segment as the above;
19906 tidt:
19907@@ -138,7 +138,7 @@ tidt:
19908 # so the kernel can live anywhere
19909 .balign 4
19910 tgdt:
19911- .short tgdt_end - tgdt # gdt limit
19912+ .short tgdt_end - tgdt - 1 # gdt limit
19913 .long tgdt - r_base
19914 .short 0
19915 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19916diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19917index 7e37dce..ec3f8e5 100644
19918--- a/arch/x86/kernel/traps.c
19919+++ b/arch/x86/kernel/traps.c
19920@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
19921
19922 /* Do we ignore FPU interrupts ? */
19923 char ignore_fpu_irq;
19924-
19925-/*
19926- * The IDT has to be page-aligned to simplify the Pentium
19927- * F0 0F bug workaround.
19928- */
19929-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19930 #endif
19931
19932 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19933@@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19934 static inline void
19935 die_if_kernel(const char *str, struct pt_regs *regs, long err)
19936 {
19937- if (!user_mode_vm(regs))
19938+ if (!user_mode(regs))
19939 die(str, regs, err);
19940 }
19941 #endif
19942
19943 static void __kprobes
19944-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19945+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19946 long error_code, siginfo_t *info)
19947 {
19948 struct task_struct *tsk = current;
19949
19950 #ifdef CONFIG_X86_32
19951- if (regs->flags & X86_VM_MASK) {
19952+ if (v8086_mode(regs)) {
19953 /*
19954 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19955 * On nmi (interrupt 2), do_trap should not be called.
19956@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19957 }
19958 #endif
19959
19960- if (!user_mode(regs))
19961+ if (!user_mode_novm(regs))
19962 goto kernel_trap;
19963
19964 #ifdef CONFIG_X86_32
19965@@ -158,7 +152,7 @@ trap_signal:
19966 printk_ratelimit()) {
19967 printk(KERN_INFO
19968 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19969- tsk->comm, tsk->pid, str,
19970+ tsk->comm, task_pid_nr(tsk), str,
19971 regs->ip, regs->sp, error_code);
19972 print_vma_addr(" in ", regs->ip);
19973 printk("\n");
19974@@ -175,8 +169,20 @@ kernel_trap:
19975 if (!fixup_exception(regs)) {
19976 tsk->thread.error_code = error_code;
19977 tsk->thread.trap_no = trapnr;
19978+
19979+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19980+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19981+ str = "PAX: suspicious stack segment fault";
19982+#endif
19983+
19984 die(str, regs, error_code);
19985 }
19986+
19987+#ifdef CONFIG_PAX_REFCOUNT
19988+ if (trapnr == 4)
19989+ pax_report_refcount_overflow(regs);
19990+#endif
19991+
19992 return;
19993
19994 #ifdef CONFIG_X86_32
19995@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19996 conditional_sti(regs);
19997
19998 #ifdef CONFIG_X86_32
19999- if (regs->flags & X86_VM_MASK)
20000+ if (v8086_mode(regs))
20001 goto gp_in_vm86;
20002 #endif
20003
20004 tsk = current;
20005- if (!user_mode(regs))
20006+ if (!user_mode_novm(regs))
20007 goto gp_in_kernel;
20008
20009+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20010+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20011+ struct mm_struct *mm = tsk->mm;
20012+ unsigned long limit;
20013+
20014+ down_write(&mm->mmap_sem);
20015+ limit = mm->context.user_cs_limit;
20016+ if (limit < TASK_SIZE) {
20017+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20018+ up_write(&mm->mmap_sem);
20019+ return;
20020+ }
20021+ up_write(&mm->mmap_sem);
20022+ }
20023+#endif
20024+
20025 tsk->thread.error_code = error_code;
20026 tsk->thread.trap_no = 13;
20027
20028@@ -305,6 +327,13 @@ gp_in_kernel:
20029 if (notify_die(DIE_GPF, "general protection fault", regs,
20030 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20031 return;
20032+
20033+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20034+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20035+ die("PAX: suspicious general protection fault", regs, error_code);
20036+ else
20037+#endif
20038+
20039 die("general protection fault", regs, error_code);
20040 }
20041
20042@@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20043 dotraplinkage notrace __kprobes void
20044 do_nmi(struct pt_regs *regs, long error_code)
20045 {
20046+
20047+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20048+ if (!user_mode(regs)) {
20049+ unsigned long cs = regs->cs & 0xFFFF;
20050+ unsigned long ip = ktva_ktla(regs->ip);
20051+
20052+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20053+ regs->ip = ip;
20054+ }
20055+#endif
20056+
20057 nmi_enter();
20058
20059 inc_irq_stat(__nmi_count);
20060@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20061 }
20062
20063 #ifdef CONFIG_X86_32
20064- if (regs->flags & X86_VM_MASK)
20065+ if (v8086_mode(regs))
20066 goto debug_vm86;
20067 #endif
20068
20069@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20070 * kernel space (but re-enable TF when returning to user mode).
20071 */
20072 if (condition & DR_STEP) {
20073- if (!user_mode(regs))
20074+ if (!user_mode_novm(regs))
20075 goto clear_TF_reenable;
20076 }
20077
20078@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20079 * Handle strange cache flush from user space exception
20080 * in all other cases. This is undocumented behaviour.
20081 */
20082- if (regs->flags & X86_VM_MASK) {
20083+ if (v8086_mode(regs)) {
20084 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20085 return;
20086 }
20087@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20088 void __math_state_restore(void)
20089 {
20090 struct thread_info *thread = current_thread_info();
20091- struct task_struct *tsk = thread->task;
20092+ struct task_struct *tsk = current;
20093
20094 /*
20095 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20096@@ -825,8 +865,7 @@ void __math_state_restore(void)
20097 */
20098 asmlinkage void math_state_restore(void)
20099 {
20100- struct thread_info *thread = current_thread_info();
20101- struct task_struct *tsk = thread->task;
20102+ struct task_struct *tsk = current;
20103
20104 if (!tsk_used_math(tsk)) {
20105 local_irq_enable();
20106diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20107new file mode 100644
20108index 0000000..50c5edd
20109--- /dev/null
20110+++ b/arch/x86/kernel/verify_cpu.S
20111@@ -0,0 +1,140 @@
20112+/*
20113+ *
20114+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
20115+ * code has been borrowed from boot/setup.S and was introduced by
20116+ * Andi Kleen.
20117+ *
20118+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20119+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20120+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20121+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20122+ *
20123+ * This source code is licensed under the GNU General Public License,
20124+ * Version 2. See the file COPYING for more details.
20125+ *
20126+ * This is a common code for verification whether CPU supports
20127+ * long mode and SSE or not. It is not called directly instead this
20128+ * file is included at various places and compiled in that context.
20129+ * This file is expected to run in 32bit code. Currently:
20130+ *
20131+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20132+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
20133+ * arch/x86/kernel/head_32.S: processor startup
20134+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20135+ *
20136+ * verify_cpu, returns the status of longmode and SSE in register %eax.
20137+ * 0: Success 1: Failure
20138+ *
20139+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20140+ *
20141+ * The caller needs to check for the error code and take the action
20142+ * appropriately. Either display a message or halt.
20143+ */
20144+
20145+#include <asm/cpufeature.h>
20146+#include <asm/msr-index.h>
20147+
20148+verify_cpu:
20149+ pushfl # Save caller passed flags
20150+ pushl $0 # Kill any dangerous flags
20151+ popfl
20152+
20153+ pushfl # standard way to check for cpuid
20154+ popl %eax
20155+ movl %eax,%ebx
20156+ xorl $0x200000,%eax
20157+ pushl %eax
20158+ popfl
20159+ pushfl
20160+ popl %eax
20161+ cmpl %eax,%ebx
20162+ jz verify_cpu_no_longmode # cpu has no cpuid
20163+
20164+ movl $0x0,%eax # See if cpuid 1 is implemented
20165+ cpuid
20166+ cmpl $0x1,%eax
20167+ jb verify_cpu_no_longmode # no cpuid 1
20168+
20169+ xor %di,%di
20170+ cmpl $0x68747541,%ebx # AuthenticAMD
20171+ jnz verify_cpu_noamd
20172+ cmpl $0x69746e65,%edx
20173+ jnz verify_cpu_noamd
20174+ cmpl $0x444d4163,%ecx
20175+ jnz verify_cpu_noamd
20176+ mov $1,%di # cpu is from AMD
20177+ jmp verify_cpu_check
20178+
20179+verify_cpu_noamd:
20180+ cmpl $0x756e6547,%ebx # GenuineIntel?
20181+ jnz verify_cpu_check
20182+ cmpl $0x49656e69,%edx
20183+ jnz verify_cpu_check
20184+ cmpl $0x6c65746e,%ecx
20185+ jnz verify_cpu_check
20186+
20187+ # only call IA32_MISC_ENABLE when:
20188+ # family > 6 || (family == 6 && model >= 0xd)
20189+ movl $0x1, %eax # check CPU family and model
20190+ cpuid
20191+ movl %eax, %ecx
20192+
20193+ andl $0x0ff00f00, %eax # mask family and extended family
20194+ shrl $8, %eax
20195+ cmpl $6, %eax
20196+ ja verify_cpu_clear_xd # family > 6, ok
20197+ jb verify_cpu_check # family < 6, skip
20198+
20199+ andl $0x000f00f0, %ecx # mask model and extended model
20200+ shrl $4, %ecx
20201+ cmpl $0xd, %ecx
20202+ jb verify_cpu_check # family == 6, model < 0xd, skip
20203+
20204+verify_cpu_clear_xd:
20205+ movl $MSR_IA32_MISC_ENABLE, %ecx
20206+ rdmsr
20207+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20208+ jnc verify_cpu_check # only write MSR if bit was changed
20209+ wrmsr
20210+
20211+verify_cpu_check:
20212+ movl $0x1,%eax # Does the cpu have what it takes
20213+ cpuid
20214+ andl $REQUIRED_MASK0,%edx
20215+ xorl $REQUIRED_MASK0,%edx
20216+ jnz verify_cpu_no_longmode
20217+
20218+ movl $0x80000000,%eax # See if extended cpuid is implemented
20219+ cpuid
20220+ cmpl $0x80000001,%eax
20221+ jb verify_cpu_no_longmode # no extended cpuid
20222+
20223+ movl $0x80000001,%eax # Does the cpu have what it takes
20224+ cpuid
20225+ andl $REQUIRED_MASK1,%edx
20226+ xorl $REQUIRED_MASK1,%edx
20227+ jnz verify_cpu_no_longmode
20228+
20229+verify_cpu_sse_test:
20230+ movl $1,%eax
20231+ cpuid
20232+ andl $SSE_MASK,%edx
20233+ cmpl $SSE_MASK,%edx
20234+ je verify_cpu_sse_ok
20235+ test %di,%di
20236+ jz verify_cpu_no_longmode # only try to force SSE on AMD
20237+ movl $MSR_K7_HWCR,%ecx
20238+ rdmsr
20239+ btr $15,%eax # enable SSE
20240+ wrmsr
20241+ xor %di,%di # don't loop
20242+ jmp verify_cpu_sse_test # try again
20243+
20244+verify_cpu_no_longmode:
20245+ popfl # Restore caller passed flags
20246+ movl $1,%eax
20247+ ret
20248+verify_cpu_sse_ok:
20249+ popfl # Restore caller passed flags
20250+ xorl %eax, %eax
20251+ ret
20252diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
20253deleted file mode 100644
20254index 45b6f8a..0000000
20255--- a/arch/x86/kernel/verify_cpu_64.S
20256+++ /dev/null
20257@@ -1,105 +0,0 @@
20258-/*
20259- *
20260- * verify_cpu.S - Code for cpu long mode and SSE verification. This
20261- * code has been borrowed from boot/setup.S and was introduced by
20262- * Andi Kleen.
20263- *
20264- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20265- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20266- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20267- *
20268- * This source code is licensed under the GNU General Public License,
20269- * Version 2. See the file COPYING for more details.
20270- *
20271- * This is a common code for verification whether CPU supports
20272- * long mode and SSE or not. It is not called directly instead this
20273- * file is included at various places and compiled in that context.
20274- * Following are the current usage.
20275- *
20276- * This file is included by both 16bit and 32bit code.
20277- *
20278- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
20279- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
20280- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
20281- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
20282- *
20283- * verify_cpu, returns the status of cpu check in register %eax.
20284- * 0: Success 1: Failure
20285- *
20286- * The caller needs to check for the error code and take the action
20287- * appropriately. Either display a message or halt.
20288- */
20289-
20290-#include <asm/cpufeature.h>
20291-
20292-verify_cpu:
20293- pushfl # Save caller passed flags
20294- pushl $0 # Kill any dangerous flags
20295- popfl
20296-
20297- pushfl # standard way to check for cpuid
20298- popl %eax
20299- movl %eax,%ebx
20300- xorl $0x200000,%eax
20301- pushl %eax
20302- popfl
20303- pushfl
20304- popl %eax
20305- cmpl %eax,%ebx
20306- jz verify_cpu_no_longmode # cpu has no cpuid
20307-
20308- movl $0x0,%eax # See if cpuid 1 is implemented
20309- cpuid
20310- cmpl $0x1,%eax
20311- jb verify_cpu_no_longmode # no cpuid 1
20312-
20313- xor %di,%di
20314- cmpl $0x68747541,%ebx # AuthenticAMD
20315- jnz verify_cpu_noamd
20316- cmpl $0x69746e65,%edx
20317- jnz verify_cpu_noamd
20318- cmpl $0x444d4163,%ecx
20319- jnz verify_cpu_noamd
20320- mov $1,%di # cpu is from AMD
20321-
20322-verify_cpu_noamd:
20323- movl $0x1,%eax # Does the cpu have what it takes
20324- cpuid
20325- andl $REQUIRED_MASK0,%edx
20326- xorl $REQUIRED_MASK0,%edx
20327- jnz verify_cpu_no_longmode
20328-
20329- movl $0x80000000,%eax # See if extended cpuid is implemented
20330- cpuid
20331- cmpl $0x80000001,%eax
20332- jb verify_cpu_no_longmode # no extended cpuid
20333-
20334- movl $0x80000001,%eax # Does the cpu have what it takes
20335- cpuid
20336- andl $REQUIRED_MASK1,%edx
20337- xorl $REQUIRED_MASK1,%edx
20338- jnz verify_cpu_no_longmode
20339-
20340-verify_cpu_sse_test:
20341- movl $1,%eax
20342- cpuid
20343- andl $SSE_MASK,%edx
20344- cmpl $SSE_MASK,%edx
20345- je verify_cpu_sse_ok
20346- test %di,%di
20347- jz verify_cpu_no_longmode # only try to force SSE on AMD
20348- movl $0xc0010015,%ecx # HWCR
20349- rdmsr
20350- btr $15,%eax # enable SSE
20351- wrmsr
20352- xor %di,%di # don't loop
20353- jmp verify_cpu_sse_test # try again
20354-
20355-verify_cpu_no_longmode:
20356- popfl # Restore caller passed flags
20357- movl $1,%eax
20358- ret
20359-verify_cpu_sse_ok:
20360- popfl # Restore caller passed flags
20361- xorl %eax, %eax
20362- ret
20363diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20364index 9c4e625..c992817 100644
20365--- a/arch/x86/kernel/vm86_32.c
20366+++ b/arch/x86/kernel/vm86_32.c
20367@@ -41,6 +41,7 @@
20368 #include <linux/ptrace.h>
20369 #include <linux/audit.h>
20370 #include <linux/stddef.h>
20371+#include <linux/grsecurity.h>
20372
20373 #include <asm/uaccess.h>
20374 #include <asm/io.h>
20375@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20376 do_exit(SIGSEGV);
20377 }
20378
20379- tss = &per_cpu(init_tss, get_cpu());
20380+ tss = init_tss + get_cpu();
20381 current->thread.sp0 = current->thread.saved_sp0;
20382 current->thread.sysenter_cs = __KERNEL_CS;
20383 load_sp0(tss, &current->thread);
20384@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
20385 struct task_struct *tsk;
20386 int tmp, ret = -EPERM;
20387
20388+#ifdef CONFIG_GRKERNSEC_VM86
20389+ if (!capable(CAP_SYS_RAWIO)) {
20390+ gr_handle_vm86();
20391+ goto out;
20392+ }
20393+#endif
20394+
20395 tsk = current;
20396 if (tsk->thread.saved_sp0)
20397 goto out;
20398@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
20399 int tmp, ret;
20400 struct vm86plus_struct __user *v86;
20401
20402+#ifdef CONFIG_GRKERNSEC_VM86
20403+ if (!capable(CAP_SYS_RAWIO)) {
20404+ gr_handle_vm86();
20405+ ret = -EPERM;
20406+ goto out;
20407+ }
20408+#endif
20409+
20410 tsk = current;
20411 switch (regs->bx) {
20412 case VM86_REQUEST_IRQ:
20413@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20414 tsk->thread.saved_fs = info->regs32->fs;
20415 tsk->thread.saved_gs = get_user_gs(info->regs32);
20416
20417- tss = &per_cpu(init_tss, get_cpu());
20418+ tss = init_tss + get_cpu();
20419 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20420 if (cpu_has_sep)
20421 tsk->thread.sysenter_cs = 0;
20422@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20423 goto cannot_handle;
20424 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20425 goto cannot_handle;
20426- intr_ptr = (unsigned long __user *) (i << 2);
20427+ intr_ptr = (__force unsigned long __user *) (i << 2);
20428 if (get_user(segoffs, intr_ptr))
20429 goto cannot_handle;
20430 if ((segoffs >> 16) == BIOSSEG)
20431diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
20432index d430e4c..831f817 100644
20433--- a/arch/x86/kernel/vmi_32.c
20434+++ b/arch/x86/kernel/vmi_32.c
20435@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
20436 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
20437
20438 #define call_vrom_func(rom,func) \
20439- (((VROMFUNC *)(rom->func))())
20440+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
20441
20442 #define call_vrom_long_func(rom,func,arg) \
20443- (((VROMLONGFUNC *)(rom->func)) (arg))
20444-
20445-static struct vrom_header *vmi_rom;
20446+({\
20447+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
20448+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
20449+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
20450+ __reloc;\
20451+})
20452+
20453+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
20454 static int disable_pge;
20455 static int disable_pse;
20456 static int disable_sep;
20457@@ -76,10 +81,10 @@ static struct {
20458 void (*set_initial_ap_state)(int, int);
20459 void (*halt)(void);
20460 void (*set_lazy_mode)(int mode);
20461-} vmi_ops;
20462+} __no_const vmi_ops __read_only;
20463
20464 /* Cached VMI operations */
20465-struct vmi_timer_ops vmi_timer_ops;
20466+struct vmi_timer_ops vmi_timer_ops __read_only;
20467
20468 /*
20469 * VMI patching routines.
20470@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
20471 static inline void patch_offset(void *insnbuf,
20472 unsigned long ip, unsigned long dest)
20473 {
20474- *(unsigned long *)(insnbuf+1) = dest-ip-5;
20475+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
20476 }
20477
20478 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20479@@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20480 {
20481 u64 reloc;
20482 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
20483+
20484 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
20485 switch(rel->type) {
20486 case VMI_RELOCATION_CALL_REL:
20487@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
20488
20489 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
20490 {
20491- const pte_t pte = { .pte = 0 };
20492+ const pte_t pte = __pte(0ULL);
20493 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
20494 }
20495
20496 static void vmi_pmd_clear(pmd_t *pmd)
20497 {
20498- const pte_t pte = { .pte = 0 };
20499+ const pte_t pte = __pte(0ULL);
20500 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
20501 }
20502 #endif
20503@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
20504 ap.ss = __KERNEL_DS;
20505 ap.esp = (unsigned long) start_esp;
20506
20507- ap.ds = __USER_DS;
20508- ap.es = __USER_DS;
20509+ ap.ds = __KERNEL_DS;
20510+ ap.es = __KERNEL_DS;
20511 ap.fs = __KERNEL_PERCPU;
20512- ap.gs = __KERNEL_STACK_CANARY;
20513+ savesegment(gs, ap.gs);
20514
20515 ap.eflags = 0;
20516
20517@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
20518 paravirt_leave_lazy_mmu();
20519 }
20520
20521+#ifdef CONFIG_PAX_KERNEXEC
20522+static unsigned long vmi_pax_open_kernel(void)
20523+{
20524+ return 0;
20525+}
20526+
20527+static unsigned long vmi_pax_close_kernel(void)
20528+{
20529+ return 0;
20530+}
20531+#endif
20532+
20533 static inline int __init check_vmi_rom(struct vrom_header *rom)
20534 {
20535 struct pci_header *pci;
20536@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
20537 return 0;
20538 if (rom->vrom_signature != VMI_SIGNATURE)
20539 return 0;
20540+ if (rom->rom_length * 512 > sizeof(*rom)) {
20541+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
20542+ return 0;
20543+ }
20544 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
20545 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
20546 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
20547@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
20548 struct vrom_header *romstart;
20549 romstart = (struct vrom_header *)isa_bus_to_virt(base);
20550 if (check_vmi_rom(romstart)) {
20551- vmi_rom = romstart;
20552+ vmi_rom = *romstart;
20553 return 1;
20554 }
20555 }
20556@@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
20557
20558 para_fill(pv_irq_ops.safe_halt, Halt);
20559
20560+#ifdef CONFIG_PAX_KERNEXEC
20561+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
20562+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
20563+#endif
20564+
20565 /*
20566 * Alternative instruction rewriting doesn't happen soon enough
20567 * to convert VMI_IRET to a call instead of a jump; so we have
20568@@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
20569
20570 void __init vmi_init(void)
20571 {
20572- if (!vmi_rom)
20573+ if (!vmi_rom.rom_signature)
20574 probe_vmi_rom();
20575 else
20576- check_vmi_rom(vmi_rom);
20577+ check_vmi_rom(&vmi_rom);
20578
20579 /* In case probing for or validating the ROM failed, basil */
20580- if (!vmi_rom)
20581+ if (!vmi_rom.rom_signature)
20582 return;
20583
20584- reserve_top_address(-vmi_rom->virtual_top);
20585+ reserve_top_address(-vmi_rom.virtual_top);
20586
20587 #ifdef CONFIG_X86_IO_APIC
20588 /* This is virtual hardware; timer routing is wired correctly */
20589@@ -874,7 +901,7 @@ void __init vmi_activate(void)
20590 {
20591 unsigned long flags;
20592
20593- if (!vmi_rom)
20594+ if (!vmi_rom.rom_signature)
20595 return;
20596
20597 local_irq_save(flags);
20598diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20599index 3c68fe2..12c8280 100644
20600--- a/arch/x86/kernel/vmlinux.lds.S
20601+++ b/arch/x86/kernel/vmlinux.lds.S
20602@@ -26,6 +26,13 @@
20603 #include <asm/page_types.h>
20604 #include <asm/cache.h>
20605 #include <asm/boot.h>
20606+#include <asm/segment.h>
20607+
20608+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20609+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20610+#else
20611+#define __KERNEL_TEXT_OFFSET 0
20612+#endif
20613
20614 #undef i386 /* in case the preprocessor is a 32bit one */
20615
20616@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
20617 #ifdef CONFIG_X86_32
20618 OUTPUT_ARCH(i386)
20619 ENTRY(phys_startup_32)
20620-jiffies = jiffies_64;
20621 #else
20622 OUTPUT_ARCH(i386:x86-64)
20623 ENTRY(phys_startup_64)
20624-jiffies_64 = jiffies;
20625 #endif
20626
20627 PHDRS {
20628 text PT_LOAD FLAGS(5); /* R_E */
20629- data PT_LOAD FLAGS(7); /* RWE */
20630+#ifdef CONFIG_X86_32
20631+ module PT_LOAD FLAGS(5); /* R_E */
20632+#endif
20633+#ifdef CONFIG_XEN
20634+ rodata PT_LOAD FLAGS(5); /* R_E */
20635+#else
20636+ rodata PT_LOAD FLAGS(4); /* R__ */
20637+#endif
20638+ data PT_LOAD FLAGS(6); /* RW_ */
20639 #ifdef CONFIG_X86_64
20640 user PT_LOAD FLAGS(5); /* R_E */
20641+#endif
20642+ init.begin PT_LOAD FLAGS(6); /* RW_ */
20643 #ifdef CONFIG_SMP
20644 percpu PT_LOAD FLAGS(6); /* RW_ */
20645 #endif
20646+ text.init PT_LOAD FLAGS(5); /* R_E */
20647+ text.exit PT_LOAD FLAGS(5); /* R_E */
20648 init PT_LOAD FLAGS(7); /* RWE */
20649-#endif
20650 note PT_NOTE FLAGS(0); /* ___ */
20651 }
20652
20653 SECTIONS
20654 {
20655 #ifdef CONFIG_X86_32
20656- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20657- phys_startup_32 = startup_32 - LOAD_OFFSET;
20658+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20659 #else
20660- . = __START_KERNEL;
20661- phys_startup_64 = startup_64 - LOAD_OFFSET;
20662+ . = __START_KERNEL;
20663 #endif
20664
20665 /* Text and read-only data */
20666- .text : AT(ADDR(.text) - LOAD_OFFSET) {
20667- _text = .;
20668+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20669 /* bootstrapping code */
20670+#ifdef CONFIG_X86_32
20671+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20672+#else
20673+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20674+#endif
20675+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20676+ _text = .;
20677 HEAD_TEXT
20678 #ifdef CONFIG_X86_32
20679 . = ALIGN(PAGE_SIZE);
20680@@ -82,28 +102,71 @@ SECTIONS
20681 IRQENTRY_TEXT
20682 *(.fixup)
20683 *(.gnu.warning)
20684- /* End of text section */
20685- _etext = .;
20686 } :text = 0x9090
20687
20688- NOTES :text :note
20689+ . += __KERNEL_TEXT_OFFSET;
20690+
20691+#ifdef CONFIG_X86_32
20692+ . = ALIGN(PAGE_SIZE);
20693+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
20694+ *(.vmi.rom)
20695+ } :module
20696+
20697+ . = ALIGN(PAGE_SIZE);
20698+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20699+
20700+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20701+ MODULES_EXEC_VADDR = .;
20702+ BYTE(0)
20703+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20704+ . = ALIGN(HPAGE_SIZE);
20705+ MODULES_EXEC_END = . - 1;
20706+#endif
20707
20708- EXCEPTION_TABLE(16) :text = 0x9090
20709+ } :module
20710+#endif
20711+
20712+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20713+ /* End of text section */
20714+ _etext = . - __KERNEL_TEXT_OFFSET;
20715+ }
20716+
20717+#ifdef CONFIG_X86_32
20718+ . = ALIGN(PAGE_SIZE);
20719+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20720+ *(.idt)
20721+ . = ALIGN(PAGE_SIZE);
20722+ *(.empty_zero_page)
20723+ *(.swapper_pg_fixmap)
20724+ *(.swapper_pg_pmd)
20725+ *(.swapper_pg_dir)
20726+ *(.trampoline_pg_dir)
20727+ } :rodata
20728+#endif
20729+
20730+ . = ALIGN(PAGE_SIZE);
20731+ NOTES :rodata :note
20732+
20733+ EXCEPTION_TABLE(16) :rodata
20734
20735 RO_DATA(PAGE_SIZE)
20736
20737 /* Data */
20738 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20739+
20740+#ifdef CONFIG_PAX_KERNEXEC
20741+ . = ALIGN(HPAGE_SIZE);
20742+#else
20743+ . = ALIGN(PAGE_SIZE);
20744+#endif
20745+
20746 /* Start of data section */
20747 _sdata = .;
20748
20749 /* init_task */
20750 INIT_TASK_DATA(THREAD_SIZE)
20751
20752-#ifdef CONFIG_X86_32
20753- /* 32 bit has nosave before _edata */
20754 NOSAVE_DATA
20755-#endif
20756
20757 PAGE_ALIGNED_DATA(PAGE_SIZE)
20758
20759@@ -112,6 +175,8 @@ SECTIONS
20760 DATA_DATA
20761 CONSTRUCTORS
20762
20763+ jiffies = jiffies_64;
20764+
20765 /* rarely changed data like cpu maps */
20766 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
20767
20768@@ -166,12 +231,6 @@ SECTIONS
20769 }
20770 vgetcpu_mode = VVIRT(.vgetcpu_mode);
20771
20772- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
20773- .jiffies : AT(VLOAD(.jiffies)) {
20774- *(.jiffies)
20775- }
20776- jiffies = VVIRT(.jiffies);
20777-
20778 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
20779 *(.vsyscall_3)
20780 }
20781@@ -187,12 +246,19 @@ SECTIONS
20782 #endif /* CONFIG_X86_64 */
20783
20784 /* Init code and data - will be freed after init */
20785- . = ALIGN(PAGE_SIZE);
20786 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20787+ BYTE(0)
20788+
20789+#ifdef CONFIG_PAX_KERNEXEC
20790+ . = ALIGN(HPAGE_SIZE);
20791+#else
20792+ . = ALIGN(PAGE_SIZE);
20793+#endif
20794+
20795 __init_begin = .; /* paired with __init_end */
20796- }
20797+ } :init.begin
20798
20799-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20800+#ifdef CONFIG_SMP
20801 /*
20802 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20803 * output PHDR, so the next output section - .init.text - should
20804@@ -201,12 +267,27 @@ SECTIONS
20805 PERCPU_VADDR(0, :percpu)
20806 #endif
20807
20808- INIT_TEXT_SECTION(PAGE_SIZE)
20809-#ifdef CONFIG_X86_64
20810- :init
20811-#endif
20812+ . = ALIGN(PAGE_SIZE);
20813+ init_begin = .;
20814+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20815+ VMLINUX_SYMBOL(_sinittext) = .;
20816+ INIT_TEXT
20817+ VMLINUX_SYMBOL(_einittext) = .;
20818+ . = ALIGN(PAGE_SIZE);
20819+ } :text.init
20820
20821- INIT_DATA_SECTION(16)
20822+ /*
20823+ * .exit.text is discard at runtime, not link time, to deal with
20824+ * references from .altinstructions and .eh_frame
20825+ */
20826+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20827+ EXIT_TEXT
20828+ . = ALIGN(16);
20829+ } :text.exit
20830+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20831+
20832+ . = ALIGN(PAGE_SIZE);
20833+ INIT_DATA_SECTION(16) :init
20834
20835 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
20836 __x86_cpu_dev_start = .;
20837@@ -232,19 +313,11 @@ SECTIONS
20838 *(.altinstr_replacement)
20839 }
20840
20841- /*
20842- * .exit.text is discard at runtime, not link time, to deal with
20843- * references from .altinstructions and .eh_frame
20844- */
20845- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20846- EXIT_TEXT
20847- }
20848-
20849 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20850 EXIT_DATA
20851 }
20852
20853-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20854+#ifndef CONFIG_SMP
20855 PERCPU(PAGE_SIZE)
20856 #endif
20857
20858@@ -267,12 +340,6 @@ SECTIONS
20859 . = ALIGN(PAGE_SIZE);
20860 }
20861
20862-#ifdef CONFIG_X86_64
20863- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20864- NOSAVE_DATA
20865- }
20866-#endif
20867-
20868 /* BSS */
20869 . = ALIGN(PAGE_SIZE);
20870 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20871@@ -288,6 +355,7 @@ SECTIONS
20872 __brk_base = .;
20873 . += 64 * 1024; /* 64k alignment slop space */
20874 *(.brk_reservation) /* areas brk users have reserved */
20875+ . = ALIGN(HPAGE_SIZE);
20876 __brk_limit = .;
20877 }
20878
20879@@ -316,13 +384,12 @@ SECTIONS
20880 * for the boot processor.
20881 */
20882 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
20883-INIT_PER_CPU(gdt_page);
20884 INIT_PER_CPU(irq_stack_union);
20885
20886 /*
20887 * Build-time check on the image size:
20888 */
20889-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20890+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20891 "kernel image bigger than KERNEL_IMAGE_SIZE");
20892
20893 #ifdef CONFIG_SMP
20894diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20895index 62f39d7..3bc46a1 100644
20896--- a/arch/x86/kernel/vsyscall_64.c
20897+++ b/arch/x86/kernel/vsyscall_64.c
20898@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
20899
20900 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
20901 /* copy vsyscall data */
20902+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
20903 vsyscall_gtod_data.clock.vread = clock->vread;
20904 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
20905 vsyscall_gtod_data.clock.mask = clock->mask;
20906@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
20907 We do this here because otherwise user space would do it on
20908 its own in a likely inferior way (no access to jiffies).
20909 If you don't like it pass NULL. */
20910- if (tcache && tcache->blob[0] == (j = __jiffies)) {
20911+ if (tcache && tcache->blob[0] == (j = jiffies)) {
20912 p = tcache->blob[1];
20913 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
20914 /* Load per CPU data from RDTSCP */
20915diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20916index 3909e3b..5433a97 100644
20917--- a/arch/x86/kernel/x8664_ksyms_64.c
20918+++ b/arch/x86/kernel/x8664_ksyms_64.c
20919@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
20920
20921 EXPORT_SYMBOL(copy_user_generic);
20922 EXPORT_SYMBOL(__copy_user_nocache);
20923-EXPORT_SYMBOL(copy_from_user);
20924-EXPORT_SYMBOL(copy_to_user);
20925 EXPORT_SYMBOL(__copy_from_user_inatomic);
20926
20927 EXPORT_SYMBOL(copy_page);
20928diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20929index c5ee17e..d63218f 100644
20930--- a/arch/x86/kernel/xsave.c
20931+++ b/arch/x86/kernel/xsave.c
20932@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20933 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20934 return -1;
20935
20936- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20937+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20938 fx_sw_user->extended_size -
20939 FP_XSTATE_MAGIC2_SIZE));
20940 /*
20941@@ -196,7 +196,7 @@ fx_only:
20942 * the other extended state.
20943 */
20944 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20945- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20946+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20947 }
20948
20949 /*
20950@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
20951 if (task_thread_info(tsk)->status & TS_XSAVE)
20952 err = restore_user_xstate(buf);
20953 else
20954- err = fxrstor_checking((__force struct i387_fxsave_struct *)
20955+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
20956 buf);
20957 if (unlikely(err)) {
20958 /*
20959diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20960index 1350e43..a94b011 100644
20961--- a/arch/x86/kvm/emulate.c
20962+++ b/arch/x86/kvm/emulate.c
20963@@ -81,8 +81,8 @@
20964 #define Src2CL (1<<29)
20965 #define Src2ImmByte (2<<29)
20966 #define Src2One (3<<29)
20967-#define Src2Imm16 (4<<29)
20968-#define Src2Mask (7<<29)
20969+#define Src2Imm16 (4U<<29)
20970+#define Src2Mask (7U<<29)
20971
20972 enum {
20973 Group1_80, Group1_81, Group1_82, Group1_83,
20974@@ -411,6 +411,7 @@ static u32 group2_table[] = {
20975
20976 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
20977 do { \
20978+ unsigned long _tmp; \
20979 __asm__ __volatile__ ( \
20980 _PRE_EFLAGS("0", "4", "2") \
20981 _op _suffix " %"_x"3,%1; " \
20982@@ -424,8 +425,6 @@ static u32 group2_table[] = {
20983 /* Raw emulation: instruction has two explicit operands. */
20984 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
20985 do { \
20986- unsigned long _tmp; \
20987- \
20988 switch ((_dst).bytes) { \
20989 case 2: \
20990 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
20991@@ -441,7 +440,6 @@ static u32 group2_table[] = {
20992
20993 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20994 do { \
20995- unsigned long _tmp; \
20996 switch ((_dst).bytes) { \
20997 case 1: \
20998 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
20999diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21000index 8dfeaaa..4daa395 100644
21001--- a/arch/x86/kvm/lapic.c
21002+++ b/arch/x86/kvm/lapic.c
21003@@ -52,7 +52,7 @@
21004 #define APIC_BUS_CYCLE_NS 1
21005
21006 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21007-#define apic_debug(fmt, arg...)
21008+#define apic_debug(fmt, arg...) do {} while (0)
21009
21010 #define APIC_LVT_NUM 6
21011 /* 14 is the version for Xeon and Pentium 8.4.8*/
21012diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21013index 3bc2707..dd157e2 100644
21014--- a/arch/x86/kvm/paging_tmpl.h
21015+++ b/arch/x86/kvm/paging_tmpl.h
21016@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21017 int level = PT_PAGE_TABLE_LEVEL;
21018 unsigned long mmu_seq;
21019
21020+ pax_track_stack();
21021+
21022 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21023 kvm_mmu_audit(vcpu, "pre page fault");
21024
21025@@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21026 kvm_mmu_free_some_pages(vcpu);
21027 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21028 level, &write_pt, pfn);
21029+ (void)sptep;
21030 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21031 sptep, *sptep, write_pt);
21032
21033diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21034index 7c6e63e..c5d92c1 100644
21035--- a/arch/x86/kvm/svm.c
21036+++ b/arch/x86/kvm/svm.c
21037@@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21038 int cpu = raw_smp_processor_id();
21039
21040 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21041+
21042+ pax_open_kernel();
21043 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21044+ pax_close_kernel();
21045+
21046 load_TR_desc();
21047 }
21048
21049@@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21050 return true;
21051 }
21052
21053-static struct kvm_x86_ops svm_x86_ops = {
21054+static const struct kvm_x86_ops svm_x86_ops = {
21055 .cpu_has_kvm_support = has_svm,
21056 .disabled_by_bios = is_disabled,
21057 .hardware_setup = svm_hardware_setup,
21058diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21059index e6d925f..e7a4af8 100644
21060--- a/arch/x86/kvm/vmx.c
21061+++ b/arch/x86/kvm/vmx.c
21062@@ -570,7 +570,11 @@ static void reload_tss(void)
21063
21064 kvm_get_gdt(&gdt);
21065 descs = (void *)gdt.base;
21066+
21067+ pax_open_kernel();
21068 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21069+ pax_close_kernel();
21070+
21071 load_TR_desc();
21072 }
21073
21074@@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21075 if (!cpu_has_vmx_flexpriority())
21076 flexpriority_enabled = 0;
21077
21078- if (!cpu_has_vmx_tpr_shadow())
21079- kvm_x86_ops->update_cr8_intercept = NULL;
21080+ if (!cpu_has_vmx_tpr_shadow()) {
21081+ pax_open_kernel();
21082+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21083+ pax_close_kernel();
21084+ }
21085
21086 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21087 kvm_disable_largepages();
21088@@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21089 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21090
21091 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21092- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21093+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21094 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21095 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21096 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21097@@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21098 "jmp .Lkvm_vmx_return \n\t"
21099 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21100 ".Lkvm_vmx_return: "
21101+
21102+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21103+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21104+ ".Lkvm_vmx_return2: "
21105+#endif
21106+
21107 /* Save guest registers, load host registers, keep flags */
21108 "xchg %0, (%%"R"sp) \n\t"
21109 "mov %%"R"ax, %c[rax](%0) \n\t"
21110@@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21111 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21112 #endif
21113 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21114+
21115+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21116+ ,[cs]"i"(__KERNEL_CS)
21117+#endif
21118+
21119 : "cc", "memory"
21120- , R"bx", R"di", R"si"
21121+ , R"ax", R"bx", R"di", R"si"
21122 #ifdef CONFIG_X86_64
21123 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21124 #endif
21125@@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21126 if (vmx->rmode.irq.pending)
21127 fixup_rmode_irq(vmx);
21128
21129- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21130+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21131+
21132+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21133+ loadsegment(fs, __KERNEL_PERCPU);
21134+#endif
21135+
21136+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21137+ __set_fs(current_thread_info()->addr_limit);
21138+#endif
21139+
21140 vmx->launched = 1;
21141
21142 vmx_complete_interrupts(vmx);
21143@@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21144 return false;
21145 }
21146
21147-static struct kvm_x86_ops vmx_x86_ops = {
21148+static const struct kvm_x86_ops vmx_x86_ops = {
21149 .cpu_has_kvm_support = cpu_has_kvm_support,
21150 .disabled_by_bios = vmx_disabled_by_bios,
21151 .hardware_setup = hardware_setup,
21152diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21153index df1cefb..5e882ad 100644
21154--- a/arch/x86/kvm/x86.c
21155+++ b/arch/x86/kvm/x86.c
21156@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21157 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21158 struct kvm_cpuid_entry2 __user *entries);
21159
21160-struct kvm_x86_ops *kvm_x86_ops;
21161+const struct kvm_x86_ops *kvm_x86_ops;
21162 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21163
21164 int ignore_msrs = 0;
21165@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21166 struct kvm_cpuid2 *cpuid,
21167 struct kvm_cpuid_entry2 __user *entries)
21168 {
21169- int r;
21170+ int r, i;
21171
21172 r = -E2BIG;
21173 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21174 goto out;
21175 r = -EFAULT;
21176- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21177- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21178+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21179 goto out;
21180+ for (i = 0; i < cpuid->nent; ++i) {
21181+ struct kvm_cpuid_entry2 cpuid_entry;
21182+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21183+ goto out;
21184+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
21185+ }
21186 vcpu->arch.cpuid_nent = cpuid->nent;
21187 kvm_apic_set_version(vcpu);
21188 return 0;
21189@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21190 struct kvm_cpuid2 *cpuid,
21191 struct kvm_cpuid_entry2 __user *entries)
21192 {
21193- int r;
21194+ int r, i;
21195
21196 vcpu_load(vcpu);
21197 r = -E2BIG;
21198 if (cpuid->nent < vcpu->arch.cpuid_nent)
21199 goto out;
21200 r = -EFAULT;
21201- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21202- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21203+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21204 goto out;
21205+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21206+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21207+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21208+ goto out;
21209+ }
21210 return 0;
21211
21212 out:
21213@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21214 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21215 struct kvm_interrupt *irq)
21216 {
21217- if (irq->irq < 0 || irq->irq >= 256)
21218+ if (irq->irq >= 256)
21219 return -EINVAL;
21220 if (irqchip_in_kernel(vcpu->kvm))
21221 return -ENXIO;
21222@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21223 .notifier_call = kvmclock_cpufreq_notifier
21224 };
21225
21226-int kvm_arch_init(void *opaque)
21227+int kvm_arch_init(const void *opaque)
21228 {
21229 int r, cpu;
21230- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21231+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
21232
21233 if (kvm_x86_ops) {
21234 printk(KERN_ERR "kvm: already loaded the other module\n");
21235diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21236index 7e59dc1..b88c98f 100644
21237--- a/arch/x86/lguest/boot.c
21238+++ b/arch/x86/lguest/boot.c
21239@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21240 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21241 * Launcher to reboot us.
21242 */
21243-static void lguest_restart(char *reason)
21244+static __noreturn void lguest_restart(char *reason)
21245 {
21246 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
21247+ BUG();
21248 }
21249
21250 /*G:050
21251diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21252index 824fa0b..c619e96 100644
21253--- a/arch/x86/lib/atomic64_32.c
21254+++ b/arch/x86/lib/atomic64_32.c
21255@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
21256 }
21257 EXPORT_SYMBOL(atomic64_cmpxchg);
21258
21259+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
21260+{
21261+ return cmpxchg8b(&ptr->counter, old_val, new_val);
21262+}
21263+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
21264+
21265 /**
21266 * atomic64_xchg - xchg atomic64 variable
21267 * @ptr: pointer to type atomic64_t
21268@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
21269 EXPORT_SYMBOL(atomic64_xchg);
21270
21271 /**
21272+ * atomic64_xchg_unchecked - xchg atomic64 variable
21273+ * @ptr: pointer to type atomic64_unchecked_t
21274+ * @new_val: value to assign
21275+ *
21276+ * Atomically xchgs the value of @ptr to @new_val and returns
21277+ * the old value.
21278+ */
21279+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21280+{
21281+ /*
21282+ * Try first with a (possibly incorrect) assumption about
21283+ * what we have there. We'll do two loops most likely,
21284+ * but we'll get an ownership MESI transaction straight away
21285+ * instead of a read transaction followed by a
21286+ * flush-for-ownership transaction:
21287+ */
21288+ u64 old_val, real_val = 0;
21289+
21290+ do {
21291+ old_val = real_val;
21292+
21293+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21294+
21295+ } while (real_val != old_val);
21296+
21297+ return old_val;
21298+}
21299+EXPORT_SYMBOL(atomic64_xchg_unchecked);
21300+
21301+/**
21302 * atomic64_set - set atomic64 variable
21303 * @ptr: pointer to type atomic64_t
21304 * @new_val: value to assign
21305@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
21306 EXPORT_SYMBOL(atomic64_set);
21307
21308 /**
21309-EXPORT_SYMBOL(atomic64_read);
21310+ * atomic64_unchecked_set - set atomic64 variable
21311+ * @ptr: pointer to type atomic64_unchecked_t
21312+ * @new_val: value to assign
21313+ *
21314+ * Atomically sets the value of @ptr to @new_val.
21315+ */
21316+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21317+{
21318+ atomic64_xchg_unchecked(ptr, new_val);
21319+}
21320+EXPORT_SYMBOL(atomic64_set_unchecked);
21321+
21322+/**
21323 * atomic64_add_return - add and return
21324 * @delta: integer value to add
21325 * @ptr: pointer to type atomic64_t
21326@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
21327 }
21328 EXPORT_SYMBOL(atomic64_add_return);
21329
21330+/**
21331+ * atomic64_add_return_unchecked - add and return
21332+ * @delta: integer value to add
21333+ * @ptr: pointer to type atomic64_unchecked_t
21334+ *
21335+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
21336+ */
21337+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21338+{
21339+ /*
21340+ * Try first with a (possibly incorrect) assumption about
21341+ * what we have there. We'll do two loops most likely,
21342+ * but we'll get an ownership MESI transaction straight away
21343+ * instead of a read transaction followed by a
21344+ * flush-for-ownership transaction:
21345+ */
21346+ u64 old_val, new_val, real_val = 0;
21347+
21348+ do {
21349+ old_val = real_val;
21350+ new_val = old_val + delta;
21351+
21352+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21353+
21354+ } while (real_val != old_val);
21355+
21356+ return new_val;
21357+}
21358+EXPORT_SYMBOL(atomic64_add_return_unchecked);
21359+
21360 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
21361 {
21362 return atomic64_add_return(-delta, ptr);
21363 }
21364 EXPORT_SYMBOL(atomic64_sub_return);
21365
21366+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21367+{
21368+ return atomic64_add_return_unchecked(-delta, ptr);
21369+}
21370+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
21371+
21372 u64 atomic64_inc_return(atomic64_t *ptr)
21373 {
21374 return atomic64_add_return(1, ptr);
21375 }
21376 EXPORT_SYMBOL(atomic64_inc_return);
21377
21378+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
21379+{
21380+ return atomic64_add_return_unchecked(1, ptr);
21381+}
21382+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
21383+
21384 u64 atomic64_dec_return(atomic64_t *ptr)
21385 {
21386 return atomic64_sub_return(1, ptr);
21387 }
21388 EXPORT_SYMBOL(atomic64_dec_return);
21389
21390+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
21391+{
21392+ return atomic64_sub_return_unchecked(1, ptr);
21393+}
21394+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
21395+
21396 /**
21397 * atomic64_add - add integer to atomic64 variable
21398 * @delta: integer value to add
21399@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
21400 EXPORT_SYMBOL(atomic64_add);
21401
21402 /**
21403+ * atomic64_add_unchecked - add integer to atomic64 variable
21404+ * @delta: integer value to add
21405+ * @ptr: pointer to type atomic64_unchecked_t
21406+ *
21407+ * Atomically adds @delta to @ptr.
21408+ */
21409+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21410+{
21411+ atomic64_add_return_unchecked(delta, ptr);
21412+}
21413+EXPORT_SYMBOL(atomic64_add_unchecked);
21414+
21415+/**
21416 * atomic64_sub - subtract the atomic64 variable
21417 * @delta: integer value to subtract
21418 * @ptr: pointer to type atomic64_t
21419@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
21420 EXPORT_SYMBOL(atomic64_sub);
21421
21422 /**
21423+ * atomic64_sub_unchecked - subtract the atomic64 variable
21424+ * @delta: integer value to subtract
21425+ * @ptr: pointer to type atomic64_unchecked_t
21426+ *
21427+ * Atomically subtracts @delta from @ptr.
21428+ */
21429+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21430+{
21431+ atomic64_add_unchecked(-delta, ptr);
21432+}
21433+EXPORT_SYMBOL(atomic64_sub_unchecked);
21434+
21435+/**
21436 * atomic64_sub_and_test - subtract value from variable and test result
21437 * @delta: integer value to subtract
21438 * @ptr: pointer to type atomic64_t
21439@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
21440 EXPORT_SYMBOL(atomic64_inc);
21441
21442 /**
21443+ * atomic64_inc_unchecked - increment atomic64 variable
21444+ * @ptr: pointer to type atomic64_unchecked_t
21445+ *
21446+ * Atomically increments @ptr by 1.
21447+ */
21448+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
21449+{
21450+ atomic64_add_unchecked(1, ptr);
21451+}
21452+EXPORT_SYMBOL(atomic64_inc_unchecked);
21453+
21454+/**
21455 * atomic64_dec - decrement atomic64 variable
21456 * @ptr: pointer to type atomic64_t
21457 *
21458@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
21459 EXPORT_SYMBOL(atomic64_dec);
21460
21461 /**
21462+ * atomic64_dec_unchecked - decrement atomic64 variable
21463+ * @ptr: pointer to type atomic64_unchecked_t
21464+ *
21465+ * Atomically decrements @ptr by 1.
21466+ */
21467+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
21468+{
21469+ atomic64_sub_unchecked(1, ptr);
21470+}
21471+EXPORT_SYMBOL(atomic64_dec_unchecked);
21472+
21473+/**
21474 * atomic64_dec_and_test - decrement and test
21475 * @ptr: pointer to type atomic64_t
21476 *
21477diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21478index adbccd0..98f96c8 100644
21479--- a/arch/x86/lib/checksum_32.S
21480+++ b/arch/x86/lib/checksum_32.S
21481@@ -28,7 +28,8 @@
21482 #include <linux/linkage.h>
21483 #include <asm/dwarf2.h>
21484 #include <asm/errno.h>
21485-
21486+#include <asm/segment.h>
21487+
21488 /*
21489 * computes a partial checksum, e.g. for TCP/UDP fragments
21490 */
21491@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21492
21493 #define ARGBASE 16
21494 #define FP 12
21495-
21496-ENTRY(csum_partial_copy_generic)
21497+
21498+ENTRY(csum_partial_copy_generic_to_user)
21499 CFI_STARTPROC
21500+
21501+#ifdef CONFIG_PAX_MEMORY_UDEREF
21502+ pushl %gs
21503+ CFI_ADJUST_CFA_OFFSET 4
21504+ popl %es
21505+ CFI_ADJUST_CFA_OFFSET -4
21506+ jmp csum_partial_copy_generic
21507+#endif
21508+
21509+ENTRY(csum_partial_copy_generic_from_user)
21510+
21511+#ifdef CONFIG_PAX_MEMORY_UDEREF
21512+ pushl %gs
21513+ CFI_ADJUST_CFA_OFFSET 4
21514+ popl %ds
21515+ CFI_ADJUST_CFA_OFFSET -4
21516+#endif
21517+
21518+ENTRY(csum_partial_copy_generic)
21519 subl $4,%esp
21520 CFI_ADJUST_CFA_OFFSET 4
21521 pushl %edi
21522@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
21523 jmp 4f
21524 SRC(1: movw (%esi), %bx )
21525 addl $2, %esi
21526-DST( movw %bx, (%edi) )
21527+DST( movw %bx, %es:(%edi) )
21528 addl $2, %edi
21529 addw %bx, %ax
21530 adcl $0, %eax
21531@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
21532 SRC(1: movl (%esi), %ebx )
21533 SRC( movl 4(%esi), %edx )
21534 adcl %ebx, %eax
21535-DST( movl %ebx, (%edi) )
21536+DST( movl %ebx, %es:(%edi) )
21537 adcl %edx, %eax
21538-DST( movl %edx, 4(%edi) )
21539+DST( movl %edx, %es:4(%edi) )
21540
21541 SRC( movl 8(%esi), %ebx )
21542 SRC( movl 12(%esi), %edx )
21543 adcl %ebx, %eax
21544-DST( movl %ebx, 8(%edi) )
21545+DST( movl %ebx, %es:8(%edi) )
21546 adcl %edx, %eax
21547-DST( movl %edx, 12(%edi) )
21548+DST( movl %edx, %es:12(%edi) )
21549
21550 SRC( movl 16(%esi), %ebx )
21551 SRC( movl 20(%esi), %edx )
21552 adcl %ebx, %eax
21553-DST( movl %ebx, 16(%edi) )
21554+DST( movl %ebx, %es:16(%edi) )
21555 adcl %edx, %eax
21556-DST( movl %edx, 20(%edi) )
21557+DST( movl %edx, %es:20(%edi) )
21558
21559 SRC( movl 24(%esi), %ebx )
21560 SRC( movl 28(%esi), %edx )
21561 adcl %ebx, %eax
21562-DST( movl %ebx, 24(%edi) )
21563+DST( movl %ebx, %es:24(%edi) )
21564 adcl %edx, %eax
21565-DST( movl %edx, 28(%edi) )
21566+DST( movl %edx, %es:28(%edi) )
21567
21568 lea 32(%esi), %esi
21569 lea 32(%edi), %edi
21570@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
21571 shrl $2, %edx # This clears CF
21572 SRC(3: movl (%esi), %ebx )
21573 adcl %ebx, %eax
21574-DST( movl %ebx, (%edi) )
21575+DST( movl %ebx, %es:(%edi) )
21576 lea 4(%esi), %esi
21577 lea 4(%edi), %edi
21578 dec %edx
21579@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
21580 jb 5f
21581 SRC( movw (%esi), %cx )
21582 leal 2(%esi), %esi
21583-DST( movw %cx, (%edi) )
21584+DST( movw %cx, %es:(%edi) )
21585 leal 2(%edi), %edi
21586 je 6f
21587 shll $16,%ecx
21588 SRC(5: movb (%esi), %cl )
21589-DST( movb %cl, (%edi) )
21590+DST( movb %cl, %es:(%edi) )
21591 6: addl %ecx, %eax
21592 adcl $0, %eax
21593 7:
21594@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
21595
21596 6001:
21597 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21598- movl $-EFAULT, (%ebx)
21599+ movl $-EFAULT, %ss:(%ebx)
21600
21601 # zero the complete destination - computing the rest
21602 # is too much work
21603@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
21604
21605 6002:
21606 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21607- movl $-EFAULT,(%ebx)
21608+ movl $-EFAULT,%ss:(%ebx)
21609 jmp 5000b
21610
21611 .previous
21612
21613+ pushl %ss
21614+ CFI_ADJUST_CFA_OFFSET 4
21615+ popl %ds
21616+ CFI_ADJUST_CFA_OFFSET -4
21617+ pushl %ss
21618+ CFI_ADJUST_CFA_OFFSET 4
21619+ popl %es
21620+ CFI_ADJUST_CFA_OFFSET -4
21621 popl %ebx
21622 CFI_ADJUST_CFA_OFFSET -4
21623 CFI_RESTORE ebx
21624@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
21625 CFI_ADJUST_CFA_OFFSET -4
21626 ret
21627 CFI_ENDPROC
21628-ENDPROC(csum_partial_copy_generic)
21629+ENDPROC(csum_partial_copy_generic_to_user)
21630
21631 #else
21632
21633 /* Version for PentiumII/PPro */
21634
21635 #define ROUND1(x) \
21636+ nop; nop; nop; \
21637 SRC(movl x(%esi), %ebx ) ; \
21638 addl %ebx, %eax ; \
21639- DST(movl %ebx, x(%edi) ) ;
21640+ DST(movl %ebx, %es:x(%edi)) ;
21641
21642 #define ROUND(x) \
21643+ nop; nop; nop; \
21644 SRC(movl x(%esi), %ebx ) ; \
21645 adcl %ebx, %eax ; \
21646- DST(movl %ebx, x(%edi) ) ;
21647+ DST(movl %ebx, %es:x(%edi)) ;
21648
21649 #define ARGBASE 12
21650-
21651-ENTRY(csum_partial_copy_generic)
21652+
21653+ENTRY(csum_partial_copy_generic_to_user)
21654 CFI_STARTPROC
21655+
21656+#ifdef CONFIG_PAX_MEMORY_UDEREF
21657+ pushl %gs
21658+ CFI_ADJUST_CFA_OFFSET 4
21659+ popl %es
21660+ CFI_ADJUST_CFA_OFFSET -4
21661+ jmp csum_partial_copy_generic
21662+#endif
21663+
21664+ENTRY(csum_partial_copy_generic_from_user)
21665+
21666+#ifdef CONFIG_PAX_MEMORY_UDEREF
21667+ pushl %gs
21668+ CFI_ADJUST_CFA_OFFSET 4
21669+ popl %ds
21670+ CFI_ADJUST_CFA_OFFSET -4
21671+#endif
21672+
21673+ENTRY(csum_partial_copy_generic)
21674 pushl %ebx
21675 CFI_ADJUST_CFA_OFFSET 4
21676 CFI_REL_OFFSET ebx, 0
21677@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
21678 subl %ebx, %edi
21679 lea -1(%esi),%edx
21680 andl $-32,%edx
21681- lea 3f(%ebx,%ebx), %ebx
21682+ lea 3f(%ebx,%ebx,2), %ebx
21683 testl %esi, %esi
21684 jmp *%ebx
21685 1: addl $64,%esi
21686@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
21687 jb 5f
21688 SRC( movw (%esi), %dx )
21689 leal 2(%esi), %esi
21690-DST( movw %dx, (%edi) )
21691+DST( movw %dx, %es:(%edi) )
21692 leal 2(%edi), %edi
21693 je 6f
21694 shll $16,%edx
21695 5:
21696 SRC( movb (%esi), %dl )
21697-DST( movb %dl, (%edi) )
21698+DST( movb %dl, %es:(%edi) )
21699 6: addl %edx, %eax
21700 adcl $0, %eax
21701 7:
21702 .section .fixup, "ax"
21703 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21704- movl $-EFAULT, (%ebx)
21705+ movl $-EFAULT, %ss:(%ebx)
21706 # zero the complete destination (computing the rest is too much work)
21707 movl ARGBASE+8(%esp),%edi # dst
21708 movl ARGBASE+12(%esp),%ecx # len
21709@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
21710 rep; stosb
21711 jmp 7b
21712 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21713- movl $-EFAULT, (%ebx)
21714+ movl $-EFAULT, %ss:(%ebx)
21715 jmp 7b
21716 .previous
21717
21718+#ifdef CONFIG_PAX_MEMORY_UDEREF
21719+ pushl %ss
21720+ CFI_ADJUST_CFA_OFFSET 4
21721+ popl %ds
21722+ CFI_ADJUST_CFA_OFFSET -4
21723+ pushl %ss
21724+ CFI_ADJUST_CFA_OFFSET 4
21725+ popl %es
21726+ CFI_ADJUST_CFA_OFFSET -4
21727+#endif
21728+
21729 popl %esi
21730 CFI_ADJUST_CFA_OFFSET -4
21731 CFI_RESTORE esi
21732@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
21733 CFI_RESTORE ebx
21734 ret
21735 CFI_ENDPROC
21736-ENDPROC(csum_partial_copy_generic)
21737+ENDPROC(csum_partial_copy_generic_to_user)
21738
21739 #undef ROUND
21740 #undef ROUND1
21741diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21742index ebeafcc..1e3a402 100644
21743--- a/arch/x86/lib/clear_page_64.S
21744+++ b/arch/x86/lib/clear_page_64.S
21745@@ -1,5 +1,6 @@
21746 #include <linux/linkage.h>
21747 #include <asm/dwarf2.h>
21748+#include <asm/alternative-asm.h>
21749
21750 /*
21751 * Zero a page.
21752@@ -10,6 +11,7 @@ ENTRY(clear_page_c)
21753 movl $4096/8,%ecx
21754 xorl %eax,%eax
21755 rep stosq
21756+ pax_force_retaddr
21757 ret
21758 CFI_ENDPROC
21759 ENDPROC(clear_page_c)
21760@@ -33,6 +35,7 @@ ENTRY(clear_page)
21761 leaq 64(%rdi),%rdi
21762 jnz .Lloop
21763 nop
21764+ pax_force_retaddr
21765 ret
21766 CFI_ENDPROC
21767 .Lclear_page_end:
21768@@ -43,7 +46,7 @@ ENDPROC(clear_page)
21769
21770 #include <asm/cpufeature.h>
21771
21772- .section .altinstr_replacement,"ax"
21773+ .section .altinstr_replacement,"a"
21774 1: .byte 0xeb /* jmp <disp8> */
21775 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21776 2:
21777diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21778index 727a5d4..333818a 100644
21779--- a/arch/x86/lib/copy_page_64.S
21780+++ b/arch/x86/lib/copy_page_64.S
21781@@ -2,12 +2,14 @@
21782
21783 #include <linux/linkage.h>
21784 #include <asm/dwarf2.h>
21785+#include <asm/alternative-asm.h>
21786
21787 ALIGN
21788 copy_page_c:
21789 CFI_STARTPROC
21790 movl $4096/8,%ecx
21791 rep movsq
21792+ pax_force_retaddr
21793 ret
21794 CFI_ENDPROC
21795 ENDPROC(copy_page_c)
21796@@ -38,7 +40,7 @@ ENTRY(copy_page)
21797 movq 16 (%rsi), %rdx
21798 movq 24 (%rsi), %r8
21799 movq 32 (%rsi), %r9
21800- movq 40 (%rsi), %r10
21801+ movq 40 (%rsi), %r13
21802 movq 48 (%rsi), %r11
21803 movq 56 (%rsi), %r12
21804
21805@@ -49,7 +51,7 @@ ENTRY(copy_page)
21806 movq %rdx, 16 (%rdi)
21807 movq %r8, 24 (%rdi)
21808 movq %r9, 32 (%rdi)
21809- movq %r10, 40 (%rdi)
21810+ movq %r13, 40 (%rdi)
21811 movq %r11, 48 (%rdi)
21812 movq %r12, 56 (%rdi)
21813
21814@@ -68,7 +70,7 @@ ENTRY(copy_page)
21815 movq 16 (%rsi), %rdx
21816 movq 24 (%rsi), %r8
21817 movq 32 (%rsi), %r9
21818- movq 40 (%rsi), %r10
21819+ movq 40 (%rsi), %r13
21820 movq 48 (%rsi), %r11
21821 movq 56 (%rsi), %r12
21822
21823@@ -77,7 +79,7 @@ ENTRY(copy_page)
21824 movq %rdx, 16 (%rdi)
21825 movq %r8, 24 (%rdi)
21826 movq %r9, 32 (%rdi)
21827- movq %r10, 40 (%rdi)
21828+ movq %r13, 40 (%rdi)
21829 movq %r11, 48 (%rdi)
21830 movq %r12, 56 (%rdi)
21831
21832@@ -94,6 +96,7 @@ ENTRY(copy_page)
21833 CFI_RESTORE r13
21834 addq $3*8,%rsp
21835 CFI_ADJUST_CFA_OFFSET -3*8
21836+ pax_force_retaddr
21837 ret
21838 .Lcopy_page_end:
21839 CFI_ENDPROC
21840@@ -104,7 +107,7 @@ ENDPROC(copy_page)
21841
21842 #include <asm/cpufeature.h>
21843
21844- .section .altinstr_replacement,"ax"
21845+ .section .altinstr_replacement,"a"
21846 1: .byte 0xeb /* jmp <disp8> */
21847 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21848 2:
21849diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21850index af8debd..40c75f3 100644
21851--- a/arch/x86/lib/copy_user_64.S
21852+++ b/arch/x86/lib/copy_user_64.S
21853@@ -15,13 +15,15 @@
21854 #include <asm/asm-offsets.h>
21855 #include <asm/thread_info.h>
21856 #include <asm/cpufeature.h>
21857+#include <asm/pgtable.h>
21858+#include <asm/alternative-asm.h>
21859
21860 .macro ALTERNATIVE_JUMP feature,orig,alt
21861 0:
21862 .byte 0xe9 /* 32bit jump */
21863 .long \orig-1f /* by default jump to orig */
21864 1:
21865- .section .altinstr_replacement,"ax"
21866+ .section .altinstr_replacement,"a"
21867 2: .byte 0xe9 /* near jump with 32bit immediate */
21868 .long \alt-1b /* offset */ /* or alternatively to alt */
21869 .previous
21870@@ -64,55 +66,26 @@
21871 #endif
21872 .endm
21873
21874-/* Standard copy_to_user with segment limit checking */
21875-ENTRY(copy_to_user)
21876- CFI_STARTPROC
21877- GET_THREAD_INFO(%rax)
21878- movq %rdi,%rcx
21879- addq %rdx,%rcx
21880- jc bad_to_user
21881- cmpq TI_addr_limit(%rax),%rcx
21882- ja bad_to_user
21883- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21884- CFI_ENDPROC
21885-ENDPROC(copy_to_user)
21886-
21887-/* Standard copy_from_user with segment limit checking */
21888-ENTRY(copy_from_user)
21889- CFI_STARTPROC
21890- GET_THREAD_INFO(%rax)
21891- movq %rsi,%rcx
21892- addq %rdx,%rcx
21893- jc bad_from_user
21894- cmpq TI_addr_limit(%rax),%rcx
21895- ja bad_from_user
21896- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21897- CFI_ENDPROC
21898-ENDPROC(copy_from_user)
21899-
21900 ENTRY(copy_user_generic)
21901 CFI_STARTPROC
21902 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21903 CFI_ENDPROC
21904 ENDPROC(copy_user_generic)
21905
21906-ENTRY(__copy_from_user_inatomic)
21907- CFI_STARTPROC
21908- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21909- CFI_ENDPROC
21910-ENDPROC(__copy_from_user_inatomic)
21911-
21912 .section .fixup,"ax"
21913 /* must zero dest */
21914 ENTRY(bad_from_user)
21915 bad_from_user:
21916 CFI_STARTPROC
21917+ testl %edx,%edx
21918+ js bad_to_user
21919 movl %edx,%ecx
21920 xorl %eax,%eax
21921 rep
21922 stosb
21923 bad_to_user:
21924 movl %edx,%eax
21925+ pax_force_retaddr
21926 ret
21927 CFI_ENDPROC
21928 ENDPROC(bad_from_user)
21929@@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21930 jz 17f
21931 1: movq (%rsi),%r8
21932 2: movq 1*8(%rsi),%r9
21933-3: movq 2*8(%rsi),%r10
21934+3: movq 2*8(%rsi),%rax
21935 4: movq 3*8(%rsi),%r11
21936 5: movq %r8,(%rdi)
21937 6: movq %r9,1*8(%rdi)
21938-7: movq %r10,2*8(%rdi)
21939+7: movq %rax,2*8(%rdi)
21940 8: movq %r11,3*8(%rdi)
21941 9: movq 4*8(%rsi),%r8
21942 10: movq 5*8(%rsi),%r9
21943-11: movq 6*8(%rsi),%r10
21944+11: movq 6*8(%rsi),%rax
21945 12: movq 7*8(%rsi),%r11
21946 13: movq %r8,4*8(%rdi)
21947 14: movq %r9,5*8(%rdi)
21948-15: movq %r10,6*8(%rdi)
21949+15: movq %rax,6*8(%rdi)
21950 16: movq %r11,7*8(%rdi)
21951 leaq 64(%rsi),%rsi
21952 leaq 64(%rdi),%rdi
21953@@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
21954 decl %ecx
21955 jnz 21b
21956 23: xor %eax,%eax
21957+ pax_force_retaddr
21958 ret
21959
21960 .section .fixup,"ax"
21961@@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
21962 3: rep
21963 movsb
21964 4: xorl %eax,%eax
21965+ pax_force_retaddr
21966 ret
21967
21968 .section .fixup,"ax"
21969diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21970index cb0c112..e3a6895 100644
21971--- a/arch/x86/lib/copy_user_nocache_64.S
21972+++ b/arch/x86/lib/copy_user_nocache_64.S
21973@@ -8,12 +8,14 @@
21974
21975 #include <linux/linkage.h>
21976 #include <asm/dwarf2.h>
21977+#include <asm/alternative-asm.h>
21978
21979 #define FIX_ALIGNMENT 1
21980
21981 #include <asm/current.h>
21982 #include <asm/asm-offsets.h>
21983 #include <asm/thread_info.h>
21984+#include <asm/pgtable.h>
21985
21986 .macro ALIGN_DESTINATION
21987 #ifdef FIX_ALIGNMENT
21988@@ -50,6 +52,15 @@
21989 */
21990 ENTRY(__copy_user_nocache)
21991 CFI_STARTPROC
21992+
21993+#ifdef CONFIG_PAX_MEMORY_UDEREF
21994+ mov $PAX_USER_SHADOW_BASE,%rcx
21995+ cmp %rcx,%rsi
21996+ jae 1f
21997+ add %rcx,%rsi
21998+1:
21999+#endif
22000+
22001 cmpl $8,%edx
22002 jb 20f /* less then 8 bytes, go to byte copy loop */
22003 ALIGN_DESTINATION
22004@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22005 jz 17f
22006 1: movq (%rsi),%r8
22007 2: movq 1*8(%rsi),%r9
22008-3: movq 2*8(%rsi),%r10
22009+3: movq 2*8(%rsi),%rax
22010 4: movq 3*8(%rsi),%r11
22011 5: movnti %r8,(%rdi)
22012 6: movnti %r9,1*8(%rdi)
22013-7: movnti %r10,2*8(%rdi)
22014+7: movnti %rax,2*8(%rdi)
22015 8: movnti %r11,3*8(%rdi)
22016 9: movq 4*8(%rsi),%r8
22017 10: movq 5*8(%rsi),%r9
22018-11: movq 6*8(%rsi),%r10
22019+11: movq 6*8(%rsi),%rax
22020 12: movq 7*8(%rsi),%r11
22021 13: movnti %r8,4*8(%rdi)
22022 14: movnti %r9,5*8(%rdi)
22023-15: movnti %r10,6*8(%rdi)
22024+15: movnti %rax,6*8(%rdi)
22025 16: movnti %r11,7*8(%rdi)
22026 leaq 64(%rsi),%rsi
22027 leaq 64(%rdi),%rdi
22028@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22029 jnz 21b
22030 23: xorl %eax,%eax
22031 sfence
22032+ pax_force_retaddr
22033 ret
22034
22035 .section .fixup,"ax"
22036diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22037index f0dba36..48cb4d6 100644
22038--- a/arch/x86/lib/csum-copy_64.S
22039+++ b/arch/x86/lib/csum-copy_64.S
22040@@ -8,6 +8,7 @@
22041 #include <linux/linkage.h>
22042 #include <asm/dwarf2.h>
22043 #include <asm/errno.h>
22044+#include <asm/alternative-asm.h>
22045
22046 /*
22047 * Checksum copy with exception handling.
22048@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22049 CFI_RESTORE rbp
22050 addq $7*8,%rsp
22051 CFI_ADJUST_CFA_OFFSET -7*8
22052+ pax_force_retaddr 0, 1
22053 ret
22054 CFI_RESTORE_STATE
22055
22056diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22057index 459b58a..9570bc7 100644
22058--- a/arch/x86/lib/csum-wrappers_64.c
22059+++ b/arch/x86/lib/csum-wrappers_64.c
22060@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22061 len -= 2;
22062 }
22063 }
22064- isum = csum_partial_copy_generic((__force const void *)src,
22065+
22066+#ifdef CONFIG_PAX_MEMORY_UDEREF
22067+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22068+ src += PAX_USER_SHADOW_BASE;
22069+#endif
22070+
22071+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
22072 dst, len, isum, errp, NULL);
22073 if (unlikely(*errp))
22074 goto out_err;
22075@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22076 }
22077
22078 *errp = 0;
22079- return csum_partial_copy_generic(src, (void __force *)dst,
22080+
22081+#ifdef CONFIG_PAX_MEMORY_UDEREF
22082+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22083+ dst += PAX_USER_SHADOW_BASE;
22084+#endif
22085+
22086+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22087 len, isum, NULL, errp);
22088 }
22089 EXPORT_SYMBOL(csum_partial_copy_to_user);
22090diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22091index 51f1504..ddac4c1 100644
22092--- a/arch/x86/lib/getuser.S
22093+++ b/arch/x86/lib/getuser.S
22094@@ -33,15 +33,38 @@
22095 #include <asm/asm-offsets.h>
22096 #include <asm/thread_info.h>
22097 #include <asm/asm.h>
22098+#include <asm/segment.h>
22099+#include <asm/pgtable.h>
22100+#include <asm/alternative-asm.h>
22101+
22102+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22103+#define __copyuser_seg gs;
22104+#else
22105+#define __copyuser_seg
22106+#endif
22107
22108 .text
22109 ENTRY(__get_user_1)
22110 CFI_STARTPROC
22111+
22112+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22113 GET_THREAD_INFO(%_ASM_DX)
22114 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22115 jae bad_get_user
22116-1: movzb (%_ASM_AX),%edx
22117+
22118+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22119+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22120+ cmp %_ASM_DX,%_ASM_AX
22121+ jae 1234f
22122+ add %_ASM_DX,%_ASM_AX
22123+1234:
22124+#endif
22125+
22126+#endif
22127+
22128+1: __copyuser_seg movzb (%_ASM_AX),%edx
22129 xor %eax,%eax
22130+ pax_force_retaddr
22131 ret
22132 CFI_ENDPROC
22133 ENDPROC(__get_user_1)
22134@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22135 ENTRY(__get_user_2)
22136 CFI_STARTPROC
22137 add $1,%_ASM_AX
22138+
22139+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22140 jc bad_get_user
22141 GET_THREAD_INFO(%_ASM_DX)
22142 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22143 jae bad_get_user
22144-2: movzwl -1(%_ASM_AX),%edx
22145+
22146+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22147+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22148+ cmp %_ASM_DX,%_ASM_AX
22149+ jae 1234f
22150+ add %_ASM_DX,%_ASM_AX
22151+1234:
22152+#endif
22153+
22154+#endif
22155+
22156+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22157 xor %eax,%eax
22158+ pax_force_retaddr
22159 ret
22160 CFI_ENDPROC
22161 ENDPROC(__get_user_2)
22162@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22163 ENTRY(__get_user_4)
22164 CFI_STARTPROC
22165 add $3,%_ASM_AX
22166+
22167+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22168 jc bad_get_user
22169 GET_THREAD_INFO(%_ASM_DX)
22170 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22171 jae bad_get_user
22172-3: mov -3(%_ASM_AX),%edx
22173+
22174+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22175+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22176+ cmp %_ASM_DX,%_ASM_AX
22177+ jae 1234f
22178+ add %_ASM_DX,%_ASM_AX
22179+1234:
22180+#endif
22181+
22182+#endif
22183+
22184+3: __copyuser_seg mov -3(%_ASM_AX),%edx
22185 xor %eax,%eax
22186+ pax_force_retaddr
22187 ret
22188 CFI_ENDPROC
22189 ENDPROC(__get_user_4)
22190@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22191 GET_THREAD_INFO(%_ASM_DX)
22192 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22193 jae bad_get_user
22194+
22195+#ifdef CONFIG_PAX_MEMORY_UDEREF
22196+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22197+ cmp %_ASM_DX,%_ASM_AX
22198+ jae 1234f
22199+ add %_ASM_DX,%_ASM_AX
22200+1234:
22201+#endif
22202+
22203 4: movq -7(%_ASM_AX),%_ASM_DX
22204 xor %eax,%eax
22205+ pax_force_retaddr
22206 ret
22207 CFI_ENDPROC
22208 ENDPROC(__get_user_8)
22209@@ -91,6 +152,7 @@ bad_get_user:
22210 CFI_STARTPROC
22211 xor %edx,%edx
22212 mov $(-EFAULT),%_ASM_AX
22213+ pax_force_retaddr
22214 ret
22215 CFI_ENDPROC
22216 END(bad_get_user)
22217diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22218index 05a95e7..326f2fa 100644
22219--- a/arch/x86/lib/iomap_copy_64.S
22220+++ b/arch/x86/lib/iomap_copy_64.S
22221@@ -17,6 +17,7 @@
22222
22223 #include <linux/linkage.h>
22224 #include <asm/dwarf2.h>
22225+#include <asm/alternative-asm.h>
22226
22227 /*
22228 * override generic version in lib/iomap_copy.c
22229@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22230 CFI_STARTPROC
22231 movl %edx,%ecx
22232 rep movsd
22233+ pax_force_retaddr
22234 ret
22235 CFI_ENDPROC
22236 ENDPROC(__iowrite32_copy)
22237diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22238index ad5441e..610e351 100644
22239--- a/arch/x86/lib/memcpy_64.S
22240+++ b/arch/x86/lib/memcpy_64.S
22241@@ -4,6 +4,7 @@
22242
22243 #include <asm/cpufeature.h>
22244 #include <asm/dwarf2.h>
22245+#include <asm/alternative-asm.h>
22246
22247 /*
22248 * memcpy - Copy a memory block.
22249@@ -34,6 +35,7 @@ memcpy_c:
22250 rep movsq
22251 movl %edx, %ecx
22252 rep movsb
22253+ pax_force_retaddr
22254 ret
22255 CFI_ENDPROC
22256 ENDPROC(memcpy_c)
22257@@ -118,6 +120,7 @@ ENTRY(memcpy)
22258 jnz .Lloop_1
22259
22260 .Lend:
22261+ pax_force_retaddr 0, 1
22262 ret
22263 CFI_ENDPROC
22264 ENDPROC(memcpy)
22265@@ -128,7 +131,7 @@ ENDPROC(__memcpy)
22266 * It is also a lot simpler. Use this when possible:
22267 */
22268
22269- .section .altinstr_replacement, "ax"
22270+ .section .altinstr_replacement, "a"
22271 1: .byte 0xeb /* jmp <disp8> */
22272 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
22273 2:
22274diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22275index 2c59481..7e9ba4e 100644
22276--- a/arch/x86/lib/memset_64.S
22277+++ b/arch/x86/lib/memset_64.S
22278@@ -2,6 +2,7 @@
22279
22280 #include <linux/linkage.h>
22281 #include <asm/dwarf2.h>
22282+#include <asm/alternative-asm.h>
22283
22284 /*
22285 * ISO C memset - set a memory block to a byte value.
22286@@ -28,6 +29,7 @@ memset_c:
22287 movl %r8d,%ecx
22288 rep stosb
22289 movq %r9,%rax
22290+ pax_force_retaddr
22291 ret
22292 CFI_ENDPROC
22293 ENDPROC(memset_c)
22294@@ -35,13 +37,13 @@ ENDPROC(memset_c)
22295 ENTRY(memset)
22296 ENTRY(__memset)
22297 CFI_STARTPROC
22298- movq %rdi,%r10
22299 movq %rdx,%r11
22300
22301 /* expand byte value */
22302 movzbl %sil,%ecx
22303 movabs $0x0101010101010101,%rax
22304 mul %rcx /* with rax, clobbers rdx */
22305+ movq %rdi,%rdx
22306
22307 /* align dst */
22308 movl %edi,%r9d
22309@@ -95,7 +97,8 @@ ENTRY(__memset)
22310 jnz .Lloop_1
22311
22312 .Lende:
22313- movq %r10,%rax
22314+ movq %rdx,%rax
22315+ pax_force_retaddr
22316 ret
22317
22318 CFI_RESTORE_STATE
22319@@ -118,7 +121,7 @@ ENDPROC(__memset)
22320
22321 #include <asm/cpufeature.h>
22322
22323- .section .altinstr_replacement,"ax"
22324+ .section .altinstr_replacement,"a"
22325 1: .byte 0xeb /* jmp <disp8> */
22326 .byte (memset_c - memset) - (2f - 1b) /* offset */
22327 2:
22328diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22329index c9f2d9b..e7fd2c0 100644
22330--- a/arch/x86/lib/mmx_32.c
22331+++ b/arch/x86/lib/mmx_32.c
22332@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22333 {
22334 void *p;
22335 int i;
22336+ unsigned long cr0;
22337
22338 if (unlikely(in_interrupt()))
22339 return __memcpy(to, from, len);
22340@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22341 kernel_fpu_begin();
22342
22343 __asm__ __volatile__ (
22344- "1: prefetch (%0)\n" /* This set is 28 bytes */
22345- " prefetch 64(%0)\n"
22346- " prefetch 128(%0)\n"
22347- " prefetch 192(%0)\n"
22348- " prefetch 256(%0)\n"
22349+ "1: prefetch (%1)\n" /* This set is 28 bytes */
22350+ " prefetch 64(%1)\n"
22351+ " prefetch 128(%1)\n"
22352+ " prefetch 192(%1)\n"
22353+ " prefetch 256(%1)\n"
22354 "2: \n"
22355 ".section .fixup, \"ax\"\n"
22356- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22357+ "3: \n"
22358+
22359+#ifdef CONFIG_PAX_KERNEXEC
22360+ " movl %%cr0, %0\n"
22361+ " movl %0, %%eax\n"
22362+ " andl $0xFFFEFFFF, %%eax\n"
22363+ " movl %%eax, %%cr0\n"
22364+#endif
22365+
22366+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22367+
22368+#ifdef CONFIG_PAX_KERNEXEC
22369+ " movl %0, %%cr0\n"
22370+#endif
22371+
22372 " jmp 2b\n"
22373 ".previous\n"
22374 _ASM_EXTABLE(1b, 3b)
22375- : : "r" (from));
22376+ : "=&r" (cr0) : "r" (from) : "ax");
22377
22378 for ( ; i > 5; i--) {
22379 __asm__ __volatile__ (
22380- "1: prefetch 320(%0)\n"
22381- "2: movq (%0), %%mm0\n"
22382- " movq 8(%0), %%mm1\n"
22383- " movq 16(%0), %%mm2\n"
22384- " movq 24(%0), %%mm3\n"
22385- " movq %%mm0, (%1)\n"
22386- " movq %%mm1, 8(%1)\n"
22387- " movq %%mm2, 16(%1)\n"
22388- " movq %%mm3, 24(%1)\n"
22389- " movq 32(%0), %%mm0\n"
22390- " movq 40(%0), %%mm1\n"
22391- " movq 48(%0), %%mm2\n"
22392- " movq 56(%0), %%mm3\n"
22393- " movq %%mm0, 32(%1)\n"
22394- " movq %%mm1, 40(%1)\n"
22395- " movq %%mm2, 48(%1)\n"
22396- " movq %%mm3, 56(%1)\n"
22397+ "1: prefetch 320(%1)\n"
22398+ "2: movq (%1), %%mm0\n"
22399+ " movq 8(%1), %%mm1\n"
22400+ " movq 16(%1), %%mm2\n"
22401+ " movq 24(%1), %%mm3\n"
22402+ " movq %%mm0, (%2)\n"
22403+ " movq %%mm1, 8(%2)\n"
22404+ " movq %%mm2, 16(%2)\n"
22405+ " movq %%mm3, 24(%2)\n"
22406+ " movq 32(%1), %%mm0\n"
22407+ " movq 40(%1), %%mm1\n"
22408+ " movq 48(%1), %%mm2\n"
22409+ " movq 56(%1), %%mm3\n"
22410+ " movq %%mm0, 32(%2)\n"
22411+ " movq %%mm1, 40(%2)\n"
22412+ " movq %%mm2, 48(%2)\n"
22413+ " movq %%mm3, 56(%2)\n"
22414 ".section .fixup, \"ax\"\n"
22415- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22416+ "3:\n"
22417+
22418+#ifdef CONFIG_PAX_KERNEXEC
22419+ " movl %%cr0, %0\n"
22420+ " movl %0, %%eax\n"
22421+ " andl $0xFFFEFFFF, %%eax\n"
22422+ " movl %%eax, %%cr0\n"
22423+#endif
22424+
22425+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22426+
22427+#ifdef CONFIG_PAX_KERNEXEC
22428+ " movl %0, %%cr0\n"
22429+#endif
22430+
22431 " jmp 2b\n"
22432 ".previous\n"
22433 _ASM_EXTABLE(1b, 3b)
22434- : : "r" (from), "r" (to) : "memory");
22435+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22436
22437 from += 64;
22438 to += 64;
22439@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22440 static void fast_copy_page(void *to, void *from)
22441 {
22442 int i;
22443+ unsigned long cr0;
22444
22445 kernel_fpu_begin();
22446
22447@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22448 * but that is for later. -AV
22449 */
22450 __asm__ __volatile__(
22451- "1: prefetch (%0)\n"
22452- " prefetch 64(%0)\n"
22453- " prefetch 128(%0)\n"
22454- " prefetch 192(%0)\n"
22455- " prefetch 256(%0)\n"
22456+ "1: prefetch (%1)\n"
22457+ " prefetch 64(%1)\n"
22458+ " prefetch 128(%1)\n"
22459+ " prefetch 192(%1)\n"
22460+ " prefetch 256(%1)\n"
22461 "2: \n"
22462 ".section .fixup, \"ax\"\n"
22463- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22464+ "3: \n"
22465+
22466+#ifdef CONFIG_PAX_KERNEXEC
22467+ " movl %%cr0, %0\n"
22468+ " movl %0, %%eax\n"
22469+ " andl $0xFFFEFFFF, %%eax\n"
22470+ " movl %%eax, %%cr0\n"
22471+#endif
22472+
22473+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22474+
22475+#ifdef CONFIG_PAX_KERNEXEC
22476+ " movl %0, %%cr0\n"
22477+#endif
22478+
22479 " jmp 2b\n"
22480 ".previous\n"
22481- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22482+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22483
22484 for (i = 0; i < (4096-320)/64; i++) {
22485 __asm__ __volatile__ (
22486- "1: prefetch 320(%0)\n"
22487- "2: movq (%0), %%mm0\n"
22488- " movntq %%mm0, (%1)\n"
22489- " movq 8(%0), %%mm1\n"
22490- " movntq %%mm1, 8(%1)\n"
22491- " movq 16(%0), %%mm2\n"
22492- " movntq %%mm2, 16(%1)\n"
22493- " movq 24(%0), %%mm3\n"
22494- " movntq %%mm3, 24(%1)\n"
22495- " movq 32(%0), %%mm4\n"
22496- " movntq %%mm4, 32(%1)\n"
22497- " movq 40(%0), %%mm5\n"
22498- " movntq %%mm5, 40(%1)\n"
22499- " movq 48(%0), %%mm6\n"
22500- " movntq %%mm6, 48(%1)\n"
22501- " movq 56(%0), %%mm7\n"
22502- " movntq %%mm7, 56(%1)\n"
22503+ "1: prefetch 320(%1)\n"
22504+ "2: movq (%1), %%mm0\n"
22505+ " movntq %%mm0, (%2)\n"
22506+ " movq 8(%1), %%mm1\n"
22507+ " movntq %%mm1, 8(%2)\n"
22508+ " movq 16(%1), %%mm2\n"
22509+ " movntq %%mm2, 16(%2)\n"
22510+ " movq 24(%1), %%mm3\n"
22511+ " movntq %%mm3, 24(%2)\n"
22512+ " movq 32(%1), %%mm4\n"
22513+ " movntq %%mm4, 32(%2)\n"
22514+ " movq 40(%1), %%mm5\n"
22515+ " movntq %%mm5, 40(%2)\n"
22516+ " movq 48(%1), %%mm6\n"
22517+ " movntq %%mm6, 48(%2)\n"
22518+ " movq 56(%1), %%mm7\n"
22519+ " movntq %%mm7, 56(%2)\n"
22520 ".section .fixup, \"ax\"\n"
22521- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22522+ "3:\n"
22523+
22524+#ifdef CONFIG_PAX_KERNEXEC
22525+ " movl %%cr0, %0\n"
22526+ " movl %0, %%eax\n"
22527+ " andl $0xFFFEFFFF, %%eax\n"
22528+ " movl %%eax, %%cr0\n"
22529+#endif
22530+
22531+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22532+
22533+#ifdef CONFIG_PAX_KERNEXEC
22534+ " movl %0, %%cr0\n"
22535+#endif
22536+
22537 " jmp 2b\n"
22538 ".previous\n"
22539- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22540+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22541
22542 from += 64;
22543 to += 64;
22544@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22545 static void fast_copy_page(void *to, void *from)
22546 {
22547 int i;
22548+ unsigned long cr0;
22549
22550 kernel_fpu_begin();
22551
22552 __asm__ __volatile__ (
22553- "1: prefetch (%0)\n"
22554- " prefetch 64(%0)\n"
22555- " prefetch 128(%0)\n"
22556- " prefetch 192(%0)\n"
22557- " prefetch 256(%0)\n"
22558+ "1: prefetch (%1)\n"
22559+ " prefetch 64(%1)\n"
22560+ " prefetch 128(%1)\n"
22561+ " prefetch 192(%1)\n"
22562+ " prefetch 256(%1)\n"
22563 "2: \n"
22564 ".section .fixup, \"ax\"\n"
22565- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22566+ "3: \n"
22567+
22568+#ifdef CONFIG_PAX_KERNEXEC
22569+ " movl %%cr0, %0\n"
22570+ " movl %0, %%eax\n"
22571+ " andl $0xFFFEFFFF, %%eax\n"
22572+ " movl %%eax, %%cr0\n"
22573+#endif
22574+
22575+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22576+
22577+#ifdef CONFIG_PAX_KERNEXEC
22578+ " movl %0, %%cr0\n"
22579+#endif
22580+
22581 " jmp 2b\n"
22582 ".previous\n"
22583- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22584+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22585
22586 for (i = 0; i < 4096/64; i++) {
22587 __asm__ __volatile__ (
22588- "1: prefetch 320(%0)\n"
22589- "2: movq (%0), %%mm0\n"
22590- " movq 8(%0), %%mm1\n"
22591- " movq 16(%0), %%mm2\n"
22592- " movq 24(%0), %%mm3\n"
22593- " movq %%mm0, (%1)\n"
22594- " movq %%mm1, 8(%1)\n"
22595- " movq %%mm2, 16(%1)\n"
22596- " movq %%mm3, 24(%1)\n"
22597- " movq 32(%0), %%mm0\n"
22598- " movq 40(%0), %%mm1\n"
22599- " movq 48(%0), %%mm2\n"
22600- " movq 56(%0), %%mm3\n"
22601- " movq %%mm0, 32(%1)\n"
22602- " movq %%mm1, 40(%1)\n"
22603- " movq %%mm2, 48(%1)\n"
22604- " movq %%mm3, 56(%1)\n"
22605+ "1: prefetch 320(%1)\n"
22606+ "2: movq (%1), %%mm0\n"
22607+ " movq 8(%1), %%mm1\n"
22608+ " movq 16(%1), %%mm2\n"
22609+ " movq 24(%1), %%mm3\n"
22610+ " movq %%mm0, (%2)\n"
22611+ " movq %%mm1, 8(%2)\n"
22612+ " movq %%mm2, 16(%2)\n"
22613+ " movq %%mm3, 24(%2)\n"
22614+ " movq 32(%1), %%mm0\n"
22615+ " movq 40(%1), %%mm1\n"
22616+ " movq 48(%1), %%mm2\n"
22617+ " movq 56(%1), %%mm3\n"
22618+ " movq %%mm0, 32(%2)\n"
22619+ " movq %%mm1, 40(%2)\n"
22620+ " movq %%mm2, 48(%2)\n"
22621+ " movq %%mm3, 56(%2)\n"
22622 ".section .fixup, \"ax\"\n"
22623- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22624+ "3:\n"
22625+
22626+#ifdef CONFIG_PAX_KERNEXEC
22627+ " movl %%cr0, %0\n"
22628+ " movl %0, %%eax\n"
22629+ " andl $0xFFFEFFFF, %%eax\n"
22630+ " movl %%eax, %%cr0\n"
22631+#endif
22632+
22633+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22634+
22635+#ifdef CONFIG_PAX_KERNEXEC
22636+ " movl %0, %%cr0\n"
22637+#endif
22638+
22639 " jmp 2b\n"
22640 ".previous\n"
22641 _ASM_EXTABLE(1b, 3b)
22642- : : "r" (from), "r" (to) : "memory");
22643+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22644
22645 from += 64;
22646 to += 64;
22647diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22648index 69fa106..adda88b 100644
22649--- a/arch/x86/lib/msr-reg.S
22650+++ b/arch/x86/lib/msr-reg.S
22651@@ -3,6 +3,7 @@
22652 #include <asm/dwarf2.h>
22653 #include <asm/asm.h>
22654 #include <asm/msr.h>
22655+#include <asm/alternative-asm.h>
22656
22657 #ifdef CONFIG_X86_64
22658 /*
22659@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22660 CFI_STARTPROC
22661 pushq_cfi %rbx
22662 pushq_cfi %rbp
22663- movq %rdi, %r10 /* Save pointer */
22664+ movq %rdi, %r9 /* Save pointer */
22665 xorl %r11d, %r11d /* Return value */
22666 movl (%rdi), %eax
22667 movl 4(%rdi), %ecx
22668@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22669 movl 28(%rdi), %edi
22670 CFI_REMEMBER_STATE
22671 1: \op
22672-2: movl %eax, (%r10)
22673+2: movl %eax, (%r9)
22674 movl %r11d, %eax /* Return value */
22675- movl %ecx, 4(%r10)
22676- movl %edx, 8(%r10)
22677- movl %ebx, 12(%r10)
22678- movl %ebp, 20(%r10)
22679- movl %esi, 24(%r10)
22680- movl %edi, 28(%r10)
22681+ movl %ecx, 4(%r9)
22682+ movl %edx, 8(%r9)
22683+ movl %ebx, 12(%r9)
22684+ movl %ebp, 20(%r9)
22685+ movl %esi, 24(%r9)
22686+ movl %edi, 28(%r9)
22687 popq_cfi %rbp
22688 popq_cfi %rbx
22689+ pax_force_retaddr
22690 ret
22691 3:
22692 CFI_RESTORE_STATE
22693diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22694index 36b0d15..d381858 100644
22695--- a/arch/x86/lib/putuser.S
22696+++ b/arch/x86/lib/putuser.S
22697@@ -15,7 +15,9 @@
22698 #include <asm/thread_info.h>
22699 #include <asm/errno.h>
22700 #include <asm/asm.h>
22701-
22702+#include <asm/segment.h>
22703+#include <asm/pgtable.h>
22704+#include <asm/alternative-asm.h>
22705
22706 /*
22707 * __put_user_X
22708@@ -29,52 +31,119 @@
22709 * as they get called from within inline assembly.
22710 */
22711
22712-#define ENTER CFI_STARTPROC ; \
22713- GET_THREAD_INFO(%_ASM_BX)
22714-#define EXIT ret ; \
22715+#define ENTER CFI_STARTPROC
22716+#define EXIT pax_force_retaddr; ret ; \
22717 CFI_ENDPROC
22718
22719+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22720+#define _DEST %_ASM_CX,%_ASM_BX
22721+#else
22722+#define _DEST %_ASM_CX
22723+#endif
22724+
22725+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22726+#define __copyuser_seg gs;
22727+#else
22728+#define __copyuser_seg
22729+#endif
22730+
22731 .text
22732 ENTRY(__put_user_1)
22733 ENTER
22734+
22735+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22736+ GET_THREAD_INFO(%_ASM_BX)
22737 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22738 jae bad_put_user
22739-1: movb %al,(%_ASM_CX)
22740+
22741+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22742+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22743+ cmp %_ASM_BX,%_ASM_CX
22744+ jb 1234f
22745+ xor %ebx,%ebx
22746+1234:
22747+#endif
22748+
22749+#endif
22750+
22751+1: __copyuser_seg movb %al,(_DEST)
22752 xor %eax,%eax
22753 EXIT
22754 ENDPROC(__put_user_1)
22755
22756 ENTRY(__put_user_2)
22757 ENTER
22758+
22759+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22760+ GET_THREAD_INFO(%_ASM_BX)
22761 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22762 sub $1,%_ASM_BX
22763 cmp %_ASM_BX,%_ASM_CX
22764 jae bad_put_user
22765-2: movw %ax,(%_ASM_CX)
22766+
22767+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22768+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22769+ cmp %_ASM_BX,%_ASM_CX
22770+ jb 1234f
22771+ xor %ebx,%ebx
22772+1234:
22773+#endif
22774+
22775+#endif
22776+
22777+2: __copyuser_seg movw %ax,(_DEST)
22778 xor %eax,%eax
22779 EXIT
22780 ENDPROC(__put_user_2)
22781
22782 ENTRY(__put_user_4)
22783 ENTER
22784+
22785+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22786+ GET_THREAD_INFO(%_ASM_BX)
22787 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22788 sub $3,%_ASM_BX
22789 cmp %_ASM_BX,%_ASM_CX
22790 jae bad_put_user
22791-3: movl %eax,(%_ASM_CX)
22792+
22793+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22794+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22795+ cmp %_ASM_BX,%_ASM_CX
22796+ jb 1234f
22797+ xor %ebx,%ebx
22798+1234:
22799+#endif
22800+
22801+#endif
22802+
22803+3: __copyuser_seg movl %eax,(_DEST)
22804 xor %eax,%eax
22805 EXIT
22806 ENDPROC(__put_user_4)
22807
22808 ENTRY(__put_user_8)
22809 ENTER
22810+
22811+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22812+ GET_THREAD_INFO(%_ASM_BX)
22813 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22814 sub $7,%_ASM_BX
22815 cmp %_ASM_BX,%_ASM_CX
22816 jae bad_put_user
22817-4: mov %_ASM_AX,(%_ASM_CX)
22818+
22819+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22820+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22821+ cmp %_ASM_BX,%_ASM_CX
22822+ jb 1234f
22823+ xor %ebx,%ebx
22824+1234:
22825+#endif
22826+
22827+#endif
22828+
22829+4: __copyuser_seg mov %_ASM_AX,(_DEST)
22830 #ifdef CONFIG_X86_32
22831-5: movl %edx,4(%_ASM_CX)
22832+5: __copyuser_seg movl %edx,4(_DEST)
22833 #endif
22834 xor %eax,%eax
22835 EXIT
22836diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
22837index 05ea55f..f81311a 100644
22838--- a/arch/x86/lib/rwlock_64.S
22839+++ b/arch/x86/lib/rwlock_64.S
22840@@ -17,6 +17,7 @@ ENTRY(__write_lock_failed)
22841 LOCK_PREFIX
22842 subl $RW_LOCK_BIAS,(%rdi)
22843 jnz __write_lock_failed
22844+ pax_force_retaddr
22845 ret
22846 CFI_ENDPROC
22847 END(__write_lock_failed)
22848@@ -33,6 +34,7 @@ ENTRY(__read_lock_failed)
22849 LOCK_PREFIX
22850 decl (%rdi)
22851 js __read_lock_failed
22852+ pax_force_retaddr
22853 ret
22854 CFI_ENDPROC
22855 END(__read_lock_failed)
22856diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
22857index 15acecf..f768b10 100644
22858--- a/arch/x86/lib/rwsem_64.S
22859+++ b/arch/x86/lib/rwsem_64.S
22860@@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
22861 call rwsem_down_read_failed
22862 popq %rdx
22863 restore_common_regs
22864+ pax_force_retaddr
22865 ret
22866 ENDPROC(call_rwsem_down_read_failed)
22867
22868@@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
22869 movq %rax,%rdi
22870 call rwsem_down_write_failed
22871 restore_common_regs
22872+ pax_force_retaddr
22873 ret
22874 ENDPROC(call_rwsem_down_write_failed)
22875
22876@@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
22877 movq %rax,%rdi
22878 call rwsem_wake
22879 restore_common_regs
22880-1: ret
22881+1: pax_force_retaddr
22882+ ret
22883 ENDPROC(call_rwsem_wake)
22884
22885 /* Fix up special calling conventions */
22886@@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
22887 call rwsem_downgrade_wake
22888 popq %rdx
22889 restore_common_regs
22890+ pax_force_retaddr
22891 ret
22892 ENDPROC(call_rwsem_downgrade_wake)
22893diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22894index bf9a7d5..fb06ab5 100644
22895--- a/arch/x86/lib/thunk_64.S
22896+++ b/arch/x86/lib/thunk_64.S
22897@@ -10,7 +10,8 @@
22898 #include <asm/dwarf2.h>
22899 #include <asm/calling.h>
22900 #include <asm/rwlock.h>
22901-
22902+ #include <asm/alternative-asm.h>
22903+
22904 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22905 .macro thunk name,func
22906 .globl \name
22907@@ -70,6 +71,7 @@
22908 SAVE_ARGS
22909 restore:
22910 RESTORE_ARGS
22911+ pax_force_retaddr
22912 ret
22913 CFI_ENDPROC
22914
22915@@ -77,5 +79,6 @@ restore:
22916 SAVE_ARGS
22917 restore_norax:
22918 RESTORE_ARGS 1
22919+ pax_force_retaddr
22920 ret
22921 CFI_ENDPROC
22922diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22923index 1f118d4..ec4a953 100644
22924--- a/arch/x86/lib/usercopy_32.c
22925+++ b/arch/x86/lib/usercopy_32.c
22926@@ -43,7 +43,7 @@ do { \
22927 __asm__ __volatile__( \
22928 " testl %1,%1\n" \
22929 " jz 2f\n" \
22930- "0: lodsb\n" \
22931+ "0: "__copyuser_seg"lodsb\n" \
22932 " stosb\n" \
22933 " testb %%al,%%al\n" \
22934 " jz 1f\n" \
22935@@ -128,10 +128,12 @@ do { \
22936 int __d0; \
22937 might_fault(); \
22938 __asm__ __volatile__( \
22939+ __COPYUSER_SET_ES \
22940 "0: rep; stosl\n" \
22941 " movl %2,%0\n" \
22942 "1: rep; stosb\n" \
22943 "2:\n" \
22944+ __COPYUSER_RESTORE_ES \
22945 ".section .fixup,\"ax\"\n" \
22946 "3: lea 0(%2,%0,4),%0\n" \
22947 " jmp 2b\n" \
22948@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
22949 might_fault();
22950
22951 __asm__ __volatile__(
22952+ __COPYUSER_SET_ES
22953 " testl %0, %0\n"
22954 " jz 3f\n"
22955 " andl %0,%%ecx\n"
22956@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
22957 " subl %%ecx,%0\n"
22958 " addl %0,%%eax\n"
22959 "1:\n"
22960+ __COPYUSER_RESTORE_ES
22961 ".section .fixup,\"ax\"\n"
22962 "2: xorl %%eax,%%eax\n"
22963 " jmp 1b\n"
22964@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
22965
22966 #ifdef CONFIG_X86_INTEL_USERCOPY
22967 static unsigned long
22968-__copy_user_intel(void __user *to, const void *from, unsigned long size)
22969+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22970 {
22971 int d0, d1;
22972 __asm__ __volatile__(
22973@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22974 " .align 2,0x90\n"
22975 "3: movl 0(%4), %%eax\n"
22976 "4: movl 4(%4), %%edx\n"
22977- "5: movl %%eax, 0(%3)\n"
22978- "6: movl %%edx, 4(%3)\n"
22979+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22980+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22981 "7: movl 8(%4), %%eax\n"
22982 "8: movl 12(%4),%%edx\n"
22983- "9: movl %%eax, 8(%3)\n"
22984- "10: movl %%edx, 12(%3)\n"
22985+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22986+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22987 "11: movl 16(%4), %%eax\n"
22988 "12: movl 20(%4), %%edx\n"
22989- "13: movl %%eax, 16(%3)\n"
22990- "14: movl %%edx, 20(%3)\n"
22991+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22992+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22993 "15: movl 24(%4), %%eax\n"
22994 "16: movl 28(%4), %%edx\n"
22995- "17: movl %%eax, 24(%3)\n"
22996- "18: movl %%edx, 28(%3)\n"
22997+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22998+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22999 "19: movl 32(%4), %%eax\n"
23000 "20: movl 36(%4), %%edx\n"
23001- "21: movl %%eax, 32(%3)\n"
23002- "22: movl %%edx, 36(%3)\n"
23003+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23004+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23005 "23: movl 40(%4), %%eax\n"
23006 "24: movl 44(%4), %%edx\n"
23007- "25: movl %%eax, 40(%3)\n"
23008- "26: movl %%edx, 44(%3)\n"
23009+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23010+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23011 "27: movl 48(%4), %%eax\n"
23012 "28: movl 52(%4), %%edx\n"
23013- "29: movl %%eax, 48(%3)\n"
23014- "30: movl %%edx, 52(%3)\n"
23015+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23016+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23017 "31: movl 56(%4), %%eax\n"
23018 "32: movl 60(%4), %%edx\n"
23019- "33: movl %%eax, 56(%3)\n"
23020- "34: movl %%edx, 60(%3)\n"
23021+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23022+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23023 " addl $-64, %0\n"
23024 " addl $64, %4\n"
23025 " addl $64, %3\n"
23026@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23027 " shrl $2, %0\n"
23028 " andl $3, %%eax\n"
23029 " cld\n"
23030+ __COPYUSER_SET_ES
23031 "99: rep; movsl\n"
23032 "36: movl %%eax, %0\n"
23033 "37: rep; movsb\n"
23034 "100:\n"
23035+ __COPYUSER_RESTORE_ES
23036+ ".section .fixup,\"ax\"\n"
23037+ "101: lea 0(%%eax,%0,4),%0\n"
23038+ " jmp 100b\n"
23039+ ".previous\n"
23040+ ".section __ex_table,\"a\"\n"
23041+ " .align 4\n"
23042+ " .long 1b,100b\n"
23043+ " .long 2b,100b\n"
23044+ " .long 3b,100b\n"
23045+ " .long 4b,100b\n"
23046+ " .long 5b,100b\n"
23047+ " .long 6b,100b\n"
23048+ " .long 7b,100b\n"
23049+ " .long 8b,100b\n"
23050+ " .long 9b,100b\n"
23051+ " .long 10b,100b\n"
23052+ " .long 11b,100b\n"
23053+ " .long 12b,100b\n"
23054+ " .long 13b,100b\n"
23055+ " .long 14b,100b\n"
23056+ " .long 15b,100b\n"
23057+ " .long 16b,100b\n"
23058+ " .long 17b,100b\n"
23059+ " .long 18b,100b\n"
23060+ " .long 19b,100b\n"
23061+ " .long 20b,100b\n"
23062+ " .long 21b,100b\n"
23063+ " .long 22b,100b\n"
23064+ " .long 23b,100b\n"
23065+ " .long 24b,100b\n"
23066+ " .long 25b,100b\n"
23067+ " .long 26b,100b\n"
23068+ " .long 27b,100b\n"
23069+ " .long 28b,100b\n"
23070+ " .long 29b,100b\n"
23071+ " .long 30b,100b\n"
23072+ " .long 31b,100b\n"
23073+ " .long 32b,100b\n"
23074+ " .long 33b,100b\n"
23075+ " .long 34b,100b\n"
23076+ " .long 35b,100b\n"
23077+ " .long 36b,100b\n"
23078+ " .long 37b,100b\n"
23079+ " .long 99b,101b\n"
23080+ ".previous"
23081+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
23082+ : "1"(to), "2"(from), "0"(size)
23083+ : "eax", "edx", "memory");
23084+ return size;
23085+}
23086+
23087+static unsigned long
23088+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23089+{
23090+ int d0, d1;
23091+ __asm__ __volatile__(
23092+ " .align 2,0x90\n"
23093+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23094+ " cmpl $67, %0\n"
23095+ " jbe 3f\n"
23096+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23097+ " .align 2,0x90\n"
23098+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23099+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23100+ "5: movl %%eax, 0(%3)\n"
23101+ "6: movl %%edx, 4(%3)\n"
23102+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23103+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23104+ "9: movl %%eax, 8(%3)\n"
23105+ "10: movl %%edx, 12(%3)\n"
23106+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23107+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23108+ "13: movl %%eax, 16(%3)\n"
23109+ "14: movl %%edx, 20(%3)\n"
23110+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23111+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23112+ "17: movl %%eax, 24(%3)\n"
23113+ "18: movl %%edx, 28(%3)\n"
23114+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23115+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23116+ "21: movl %%eax, 32(%3)\n"
23117+ "22: movl %%edx, 36(%3)\n"
23118+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23119+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23120+ "25: movl %%eax, 40(%3)\n"
23121+ "26: movl %%edx, 44(%3)\n"
23122+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23123+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23124+ "29: movl %%eax, 48(%3)\n"
23125+ "30: movl %%edx, 52(%3)\n"
23126+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23127+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23128+ "33: movl %%eax, 56(%3)\n"
23129+ "34: movl %%edx, 60(%3)\n"
23130+ " addl $-64, %0\n"
23131+ " addl $64, %4\n"
23132+ " addl $64, %3\n"
23133+ " cmpl $63, %0\n"
23134+ " ja 1b\n"
23135+ "35: movl %0, %%eax\n"
23136+ " shrl $2, %0\n"
23137+ " andl $3, %%eax\n"
23138+ " cld\n"
23139+ "99: rep; "__copyuser_seg" movsl\n"
23140+ "36: movl %%eax, %0\n"
23141+ "37: rep; "__copyuser_seg" movsb\n"
23142+ "100:\n"
23143 ".section .fixup,\"ax\"\n"
23144 "101: lea 0(%%eax,%0,4),%0\n"
23145 " jmp 100b\n"
23146@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23147 int d0, d1;
23148 __asm__ __volatile__(
23149 " .align 2,0x90\n"
23150- "0: movl 32(%4), %%eax\n"
23151+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23152 " cmpl $67, %0\n"
23153 " jbe 2f\n"
23154- "1: movl 64(%4), %%eax\n"
23155+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23156 " .align 2,0x90\n"
23157- "2: movl 0(%4), %%eax\n"
23158- "21: movl 4(%4), %%edx\n"
23159+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23160+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23161 " movl %%eax, 0(%3)\n"
23162 " movl %%edx, 4(%3)\n"
23163- "3: movl 8(%4), %%eax\n"
23164- "31: movl 12(%4),%%edx\n"
23165+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23166+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23167 " movl %%eax, 8(%3)\n"
23168 " movl %%edx, 12(%3)\n"
23169- "4: movl 16(%4), %%eax\n"
23170- "41: movl 20(%4), %%edx\n"
23171+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23172+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23173 " movl %%eax, 16(%3)\n"
23174 " movl %%edx, 20(%3)\n"
23175- "10: movl 24(%4), %%eax\n"
23176- "51: movl 28(%4), %%edx\n"
23177+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23178+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23179 " movl %%eax, 24(%3)\n"
23180 " movl %%edx, 28(%3)\n"
23181- "11: movl 32(%4), %%eax\n"
23182- "61: movl 36(%4), %%edx\n"
23183+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23184+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23185 " movl %%eax, 32(%3)\n"
23186 " movl %%edx, 36(%3)\n"
23187- "12: movl 40(%4), %%eax\n"
23188- "71: movl 44(%4), %%edx\n"
23189+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23190+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23191 " movl %%eax, 40(%3)\n"
23192 " movl %%edx, 44(%3)\n"
23193- "13: movl 48(%4), %%eax\n"
23194- "81: movl 52(%4), %%edx\n"
23195+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23196+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23197 " movl %%eax, 48(%3)\n"
23198 " movl %%edx, 52(%3)\n"
23199- "14: movl 56(%4), %%eax\n"
23200- "91: movl 60(%4), %%edx\n"
23201+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23202+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23203 " movl %%eax, 56(%3)\n"
23204 " movl %%edx, 60(%3)\n"
23205 " addl $-64, %0\n"
23206@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23207 " shrl $2, %0\n"
23208 " andl $3, %%eax\n"
23209 " cld\n"
23210- "6: rep; movsl\n"
23211+ "6: rep; "__copyuser_seg" movsl\n"
23212 " movl %%eax,%0\n"
23213- "7: rep; movsb\n"
23214+ "7: rep; "__copyuser_seg" movsb\n"
23215 "8:\n"
23216 ".section .fixup,\"ax\"\n"
23217 "9: lea 0(%%eax,%0,4),%0\n"
23218@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23219
23220 __asm__ __volatile__(
23221 " .align 2,0x90\n"
23222- "0: movl 32(%4), %%eax\n"
23223+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23224 " cmpl $67, %0\n"
23225 " jbe 2f\n"
23226- "1: movl 64(%4), %%eax\n"
23227+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23228 " .align 2,0x90\n"
23229- "2: movl 0(%4), %%eax\n"
23230- "21: movl 4(%4), %%edx\n"
23231+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23232+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23233 " movnti %%eax, 0(%3)\n"
23234 " movnti %%edx, 4(%3)\n"
23235- "3: movl 8(%4), %%eax\n"
23236- "31: movl 12(%4),%%edx\n"
23237+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23238+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23239 " movnti %%eax, 8(%3)\n"
23240 " movnti %%edx, 12(%3)\n"
23241- "4: movl 16(%4), %%eax\n"
23242- "41: movl 20(%4), %%edx\n"
23243+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23244+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23245 " movnti %%eax, 16(%3)\n"
23246 " movnti %%edx, 20(%3)\n"
23247- "10: movl 24(%4), %%eax\n"
23248- "51: movl 28(%4), %%edx\n"
23249+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23250+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23251 " movnti %%eax, 24(%3)\n"
23252 " movnti %%edx, 28(%3)\n"
23253- "11: movl 32(%4), %%eax\n"
23254- "61: movl 36(%4), %%edx\n"
23255+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23256+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23257 " movnti %%eax, 32(%3)\n"
23258 " movnti %%edx, 36(%3)\n"
23259- "12: movl 40(%4), %%eax\n"
23260- "71: movl 44(%4), %%edx\n"
23261+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23262+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23263 " movnti %%eax, 40(%3)\n"
23264 " movnti %%edx, 44(%3)\n"
23265- "13: movl 48(%4), %%eax\n"
23266- "81: movl 52(%4), %%edx\n"
23267+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23268+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23269 " movnti %%eax, 48(%3)\n"
23270 " movnti %%edx, 52(%3)\n"
23271- "14: movl 56(%4), %%eax\n"
23272- "91: movl 60(%4), %%edx\n"
23273+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23274+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23275 " movnti %%eax, 56(%3)\n"
23276 " movnti %%edx, 60(%3)\n"
23277 " addl $-64, %0\n"
23278@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23279 " shrl $2, %0\n"
23280 " andl $3, %%eax\n"
23281 " cld\n"
23282- "6: rep; movsl\n"
23283+ "6: rep; "__copyuser_seg" movsl\n"
23284 " movl %%eax,%0\n"
23285- "7: rep; movsb\n"
23286+ "7: rep; "__copyuser_seg" movsb\n"
23287 "8:\n"
23288 ".section .fixup,\"ax\"\n"
23289 "9: lea 0(%%eax,%0,4),%0\n"
23290@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
23291
23292 __asm__ __volatile__(
23293 " .align 2,0x90\n"
23294- "0: movl 32(%4), %%eax\n"
23295+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23296 " cmpl $67, %0\n"
23297 " jbe 2f\n"
23298- "1: movl 64(%4), %%eax\n"
23299+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23300 " .align 2,0x90\n"
23301- "2: movl 0(%4), %%eax\n"
23302- "21: movl 4(%4), %%edx\n"
23303+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23304+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23305 " movnti %%eax, 0(%3)\n"
23306 " movnti %%edx, 4(%3)\n"
23307- "3: movl 8(%4), %%eax\n"
23308- "31: movl 12(%4),%%edx\n"
23309+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23310+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23311 " movnti %%eax, 8(%3)\n"
23312 " movnti %%edx, 12(%3)\n"
23313- "4: movl 16(%4), %%eax\n"
23314- "41: movl 20(%4), %%edx\n"
23315+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23316+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23317 " movnti %%eax, 16(%3)\n"
23318 " movnti %%edx, 20(%3)\n"
23319- "10: movl 24(%4), %%eax\n"
23320- "51: movl 28(%4), %%edx\n"
23321+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23322+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23323 " movnti %%eax, 24(%3)\n"
23324 " movnti %%edx, 28(%3)\n"
23325- "11: movl 32(%4), %%eax\n"
23326- "61: movl 36(%4), %%edx\n"
23327+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23328+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23329 " movnti %%eax, 32(%3)\n"
23330 " movnti %%edx, 36(%3)\n"
23331- "12: movl 40(%4), %%eax\n"
23332- "71: movl 44(%4), %%edx\n"
23333+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23334+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23335 " movnti %%eax, 40(%3)\n"
23336 " movnti %%edx, 44(%3)\n"
23337- "13: movl 48(%4), %%eax\n"
23338- "81: movl 52(%4), %%edx\n"
23339+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23340+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23341 " movnti %%eax, 48(%3)\n"
23342 " movnti %%edx, 52(%3)\n"
23343- "14: movl 56(%4), %%eax\n"
23344- "91: movl 60(%4), %%edx\n"
23345+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23346+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23347 " movnti %%eax, 56(%3)\n"
23348 " movnti %%edx, 60(%3)\n"
23349 " addl $-64, %0\n"
23350@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23351 " shrl $2, %0\n"
23352 " andl $3, %%eax\n"
23353 " cld\n"
23354- "6: rep; movsl\n"
23355+ "6: rep; "__copyuser_seg" movsl\n"
23356 " movl %%eax,%0\n"
23357- "7: rep; movsb\n"
23358+ "7: rep; "__copyuser_seg" movsb\n"
23359 "8:\n"
23360 ".section .fixup,\"ax\"\n"
23361 "9: lea 0(%%eax,%0,4),%0\n"
23362@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23363 */
23364 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23365 unsigned long size);
23366-unsigned long __copy_user_intel(void __user *to, const void *from,
23367+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23368+ unsigned long size);
23369+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23370 unsigned long size);
23371 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23372 const void __user *from, unsigned long size);
23373 #endif /* CONFIG_X86_INTEL_USERCOPY */
23374
23375 /* Generic arbitrary sized copy. */
23376-#define __copy_user(to, from, size) \
23377+#define __copy_user(to, from, size, prefix, set, restore) \
23378 do { \
23379 int __d0, __d1, __d2; \
23380 __asm__ __volatile__( \
23381+ set \
23382 " cmp $7,%0\n" \
23383 " jbe 1f\n" \
23384 " movl %1,%0\n" \
23385 " negl %0\n" \
23386 " andl $7,%0\n" \
23387 " subl %0,%3\n" \
23388- "4: rep; movsb\n" \
23389+ "4: rep; "prefix"movsb\n" \
23390 " movl %3,%0\n" \
23391 " shrl $2,%0\n" \
23392 " andl $3,%3\n" \
23393 " .align 2,0x90\n" \
23394- "0: rep; movsl\n" \
23395+ "0: rep; "prefix"movsl\n" \
23396 " movl %3,%0\n" \
23397- "1: rep; movsb\n" \
23398+ "1: rep; "prefix"movsb\n" \
23399 "2:\n" \
23400+ restore \
23401 ".section .fixup,\"ax\"\n" \
23402 "5: addl %3,%0\n" \
23403 " jmp 2b\n" \
23404@@ -682,14 +799,14 @@ do { \
23405 " negl %0\n" \
23406 " andl $7,%0\n" \
23407 " subl %0,%3\n" \
23408- "4: rep; movsb\n" \
23409+ "4: rep; "__copyuser_seg"movsb\n" \
23410 " movl %3,%0\n" \
23411 " shrl $2,%0\n" \
23412 " andl $3,%3\n" \
23413 " .align 2,0x90\n" \
23414- "0: rep; movsl\n" \
23415+ "0: rep; "__copyuser_seg"movsl\n" \
23416 " movl %3,%0\n" \
23417- "1: rep; movsb\n" \
23418+ "1: rep; "__copyuser_seg"movsb\n" \
23419 "2:\n" \
23420 ".section .fixup,\"ax\"\n" \
23421 "5: addl %3,%0\n" \
23422@@ -775,9 +892,9 @@ survive:
23423 }
23424 #endif
23425 if (movsl_is_ok(to, from, n))
23426- __copy_user(to, from, n);
23427+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23428 else
23429- n = __copy_user_intel(to, from, n);
23430+ n = __generic_copy_to_user_intel(to, from, n);
23431 return n;
23432 }
23433 EXPORT_SYMBOL(__copy_to_user_ll);
23434@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23435 unsigned long n)
23436 {
23437 if (movsl_is_ok(to, from, n))
23438- __copy_user(to, from, n);
23439+ __copy_user(to, from, n, __copyuser_seg, "", "");
23440 else
23441- n = __copy_user_intel((void __user *)to,
23442- (const void *)from, n);
23443+ n = __generic_copy_from_user_intel(to, from, n);
23444 return n;
23445 }
23446 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23447@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23448 if (n > 64 && cpu_has_xmm2)
23449 n = __copy_user_intel_nocache(to, from, n);
23450 else
23451- __copy_user(to, from, n);
23452+ __copy_user(to, from, n, __copyuser_seg, "", "");
23453 #else
23454- __copy_user(to, from, n);
23455+ __copy_user(to, from, n, __copyuser_seg, "", "");
23456 #endif
23457 return n;
23458 }
23459 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23460
23461-/**
23462- * copy_to_user: - Copy a block of data into user space.
23463- * @to: Destination address, in user space.
23464- * @from: Source address, in kernel space.
23465- * @n: Number of bytes to copy.
23466- *
23467- * Context: User context only. This function may sleep.
23468- *
23469- * Copy data from kernel space to user space.
23470- *
23471- * Returns number of bytes that could not be copied.
23472- * On success, this will be zero.
23473- */
23474-unsigned long
23475-copy_to_user(void __user *to, const void *from, unsigned long n)
23476+#ifdef CONFIG_PAX_MEMORY_UDEREF
23477+void __set_fs(mm_segment_t x)
23478 {
23479- if (access_ok(VERIFY_WRITE, to, n))
23480- n = __copy_to_user(to, from, n);
23481- return n;
23482+ switch (x.seg) {
23483+ case 0:
23484+ loadsegment(gs, 0);
23485+ break;
23486+ case TASK_SIZE_MAX:
23487+ loadsegment(gs, __USER_DS);
23488+ break;
23489+ case -1UL:
23490+ loadsegment(gs, __KERNEL_DS);
23491+ break;
23492+ default:
23493+ BUG();
23494+ }
23495+ return;
23496 }
23497-EXPORT_SYMBOL(copy_to_user);
23498+EXPORT_SYMBOL(__set_fs);
23499
23500-/**
23501- * copy_from_user: - Copy a block of data from user space.
23502- * @to: Destination address, in kernel space.
23503- * @from: Source address, in user space.
23504- * @n: Number of bytes to copy.
23505- *
23506- * Context: User context only. This function may sleep.
23507- *
23508- * Copy data from user space to kernel space.
23509- *
23510- * Returns number of bytes that could not be copied.
23511- * On success, this will be zero.
23512- *
23513- * If some data could not be copied, this function will pad the copied
23514- * data to the requested size using zero bytes.
23515- */
23516-unsigned long
23517-copy_from_user(void *to, const void __user *from, unsigned long n)
23518+void set_fs(mm_segment_t x)
23519 {
23520- if (access_ok(VERIFY_READ, from, n))
23521- n = __copy_from_user(to, from, n);
23522- else
23523- memset(to, 0, n);
23524- return n;
23525+ current_thread_info()->addr_limit = x;
23526+ __set_fs(x);
23527 }
23528-EXPORT_SYMBOL(copy_from_user);
23529+EXPORT_SYMBOL(set_fs);
23530+#endif
23531diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23532index b7c2849..5ef0f95 100644
23533--- a/arch/x86/lib/usercopy_64.c
23534+++ b/arch/x86/lib/usercopy_64.c
23535@@ -42,6 +42,12 @@ long
23536 __strncpy_from_user(char *dst, const char __user *src, long count)
23537 {
23538 long res;
23539+
23540+#ifdef CONFIG_PAX_MEMORY_UDEREF
23541+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23542+ src += PAX_USER_SHADOW_BASE;
23543+#endif
23544+
23545 __do_strncpy_from_user(dst, src, count, res);
23546 return res;
23547 }
23548@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23549 {
23550 long __d0;
23551 might_fault();
23552+
23553+#ifdef CONFIG_PAX_MEMORY_UDEREF
23554+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23555+ addr += PAX_USER_SHADOW_BASE;
23556+#endif
23557+
23558 /* no memory constraint because it doesn't change any memory gcc knows
23559 about */
23560 asm volatile(
23561@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
23562
23563 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23564 {
23565- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23566- return copy_user_generic((__force void *)to, (__force void *)from, len);
23567- }
23568- return len;
23569+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23570+
23571+#ifdef CONFIG_PAX_MEMORY_UDEREF
23572+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23573+ to += PAX_USER_SHADOW_BASE;
23574+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23575+ from += PAX_USER_SHADOW_BASE;
23576+#endif
23577+
23578+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23579+ }
23580+ return len;
23581 }
23582 EXPORT_SYMBOL(copy_in_user);
23583
23584@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23585 * it is not necessary to optimize tail handling.
23586 */
23587 unsigned long
23588-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23589+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
23590 {
23591 char c;
23592 unsigned zero_len;
23593diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23594index 61b41ca..5fef66a 100644
23595--- a/arch/x86/mm/extable.c
23596+++ b/arch/x86/mm/extable.c
23597@@ -1,14 +1,71 @@
23598 #include <linux/module.h>
23599 #include <linux/spinlock.h>
23600+#include <linux/sort.h>
23601 #include <asm/uaccess.h>
23602+#include <asm/pgtable.h>
23603
23604+/*
23605+ * The exception table needs to be sorted so that the binary
23606+ * search that we use to find entries in it works properly.
23607+ * This is used both for the kernel exception table and for
23608+ * the exception tables of modules that get loaded.
23609+ */
23610+static int cmp_ex(const void *a, const void *b)
23611+{
23612+ const struct exception_table_entry *x = a, *y = b;
23613+
23614+ /* avoid overflow */
23615+ if (x->insn > y->insn)
23616+ return 1;
23617+ if (x->insn < y->insn)
23618+ return -1;
23619+ return 0;
23620+}
23621+
23622+static void swap_ex(void *a, void *b, int size)
23623+{
23624+ struct exception_table_entry t, *x = a, *y = b;
23625+
23626+ t = *x;
23627+
23628+ pax_open_kernel();
23629+ *x = *y;
23630+ *y = t;
23631+ pax_close_kernel();
23632+}
23633+
23634+void sort_extable(struct exception_table_entry *start,
23635+ struct exception_table_entry *finish)
23636+{
23637+ sort(start, finish - start, sizeof(struct exception_table_entry),
23638+ cmp_ex, swap_ex);
23639+}
23640+
23641+#ifdef CONFIG_MODULES
23642+/*
23643+ * If the exception table is sorted, any referring to the module init
23644+ * will be at the beginning or the end.
23645+ */
23646+void trim_init_extable(struct module *m)
23647+{
23648+ /*trim the beginning*/
23649+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
23650+ m->extable++;
23651+ m->num_exentries--;
23652+ }
23653+ /*trim the end*/
23654+ while (m->num_exentries &&
23655+ within_module_init(m->extable[m->num_exentries-1].insn, m))
23656+ m->num_exentries--;
23657+}
23658+#endif /* CONFIG_MODULES */
23659
23660 int fixup_exception(struct pt_regs *regs)
23661 {
23662 const struct exception_table_entry *fixup;
23663
23664 #ifdef CONFIG_PNPBIOS
23665- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23666+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23667 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23668 extern u32 pnp_bios_is_utter_crap;
23669 pnp_bios_is_utter_crap = 1;
23670diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23671index 8ac0d76..3f191dc 100644
23672--- a/arch/x86/mm/fault.c
23673+++ b/arch/x86/mm/fault.c
23674@@ -11,10 +11,19 @@
23675 #include <linux/kprobes.h> /* __kprobes, ... */
23676 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
23677 #include <linux/perf_event.h> /* perf_sw_event */
23678+#include <linux/unistd.h>
23679+#include <linux/compiler.h>
23680
23681 #include <asm/traps.h> /* dotraplinkage, ... */
23682 #include <asm/pgalloc.h> /* pgd_*(), ... */
23683 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23684+#include <asm/vsyscall.h>
23685+#include <asm/tlbflush.h>
23686+
23687+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23688+#include <asm/stacktrace.h>
23689+#include "../kernel/dumpstack.h"
23690+#endif
23691
23692 /*
23693 * Page fault error code bits:
23694@@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
23695 int ret = 0;
23696
23697 /* kprobe_running() needs smp_processor_id() */
23698- if (kprobes_built_in() && !user_mode_vm(regs)) {
23699+ if (kprobes_built_in() && !user_mode(regs)) {
23700 preempt_disable();
23701 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23702 ret = 1;
23703@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23704 return !instr_lo || (instr_lo>>1) == 1;
23705 case 0x00:
23706 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23707- if (probe_kernel_address(instr, opcode))
23708+ if (user_mode(regs)) {
23709+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23710+ return 0;
23711+ } else if (probe_kernel_address(instr, opcode))
23712 return 0;
23713
23714 *prefetch = (instr_lo == 0xF) &&
23715@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23716 while (instr < max_instr) {
23717 unsigned char opcode;
23718
23719- if (probe_kernel_address(instr, opcode))
23720+ if (user_mode(regs)) {
23721+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23722+ break;
23723+ } else if (probe_kernel_address(instr, opcode))
23724 break;
23725
23726 instr++;
23727@@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23728 force_sig_info(si_signo, &info, tsk);
23729 }
23730
23731+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23732+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23733+#endif
23734+
23735+#ifdef CONFIG_PAX_EMUTRAMP
23736+static int pax_handle_fetch_fault(struct pt_regs *regs);
23737+#endif
23738+
23739+#ifdef CONFIG_PAX_PAGEEXEC
23740+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23741+{
23742+ pgd_t *pgd;
23743+ pud_t *pud;
23744+ pmd_t *pmd;
23745+
23746+ pgd = pgd_offset(mm, address);
23747+ if (!pgd_present(*pgd))
23748+ return NULL;
23749+ pud = pud_offset(pgd, address);
23750+ if (!pud_present(*pud))
23751+ return NULL;
23752+ pmd = pmd_offset(pud, address);
23753+ if (!pmd_present(*pmd))
23754+ return NULL;
23755+ return pmd;
23756+}
23757+#endif
23758+
23759 DEFINE_SPINLOCK(pgd_lock);
23760 LIST_HEAD(pgd_list);
23761
23762@@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
23763 address += PMD_SIZE) {
23764
23765 unsigned long flags;
23766+
23767+#ifdef CONFIG_PAX_PER_CPU_PGD
23768+ unsigned long cpu;
23769+#else
23770 struct page *page;
23771+#endif
23772
23773 spin_lock_irqsave(&pgd_lock, flags);
23774+
23775+#ifdef CONFIG_PAX_PER_CPU_PGD
23776+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23777+ pgd_t *pgd = get_cpu_pgd(cpu);
23778+#else
23779 list_for_each_entry(page, &pgd_list, lru) {
23780- if (!vmalloc_sync_one(page_address(page), address))
23781+ pgd_t *pgd = page_address(page);
23782+#endif
23783+
23784+ if (!vmalloc_sync_one(pgd, address))
23785 break;
23786 }
23787 spin_unlock_irqrestore(&pgd_lock, flags);
23788@@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
23789 * an interrupt in the middle of a task switch..
23790 */
23791 pgd_paddr = read_cr3();
23792+
23793+#ifdef CONFIG_PAX_PER_CPU_PGD
23794+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23795+#endif
23796+
23797 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23798 if (!pmd_k)
23799 return -1;
23800@@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
23801
23802 const pgd_t *pgd_ref = pgd_offset_k(address);
23803 unsigned long flags;
23804+
23805+#ifdef CONFIG_PAX_PER_CPU_PGD
23806+ unsigned long cpu;
23807+#else
23808 struct page *page;
23809+#endif
23810
23811 if (pgd_none(*pgd_ref))
23812 continue;
23813
23814 spin_lock_irqsave(&pgd_lock, flags);
23815+
23816+#ifdef CONFIG_PAX_PER_CPU_PGD
23817+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23818+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
23819+#else
23820 list_for_each_entry(page, &pgd_list, lru) {
23821 pgd_t *pgd;
23822 pgd = (pgd_t *)page_address(page) + pgd_index(address);
23823+#endif
23824+
23825 if (pgd_none(*pgd))
23826 set_pgd(pgd, *pgd_ref);
23827 else
23828@@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
23829 * happen within a race in page table update. In the later
23830 * case just flush:
23831 */
23832+
23833+#ifdef CONFIG_PAX_PER_CPU_PGD
23834+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23835+ pgd = pgd_offset_cpu(smp_processor_id(), address);
23836+#else
23837 pgd = pgd_offset(current->active_mm, address);
23838+#endif
23839+
23840 pgd_ref = pgd_offset_k(address);
23841 if (pgd_none(*pgd_ref))
23842 return -1;
23843@@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23844 static int is_errata100(struct pt_regs *regs, unsigned long address)
23845 {
23846 #ifdef CONFIG_X86_64
23847- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23848+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23849 return 1;
23850 #endif
23851 return 0;
23852@@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23853 }
23854
23855 static const char nx_warning[] = KERN_CRIT
23856-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23857+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23858
23859 static void
23860 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23861@@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23862 if (!oops_may_print())
23863 return;
23864
23865- if (error_code & PF_INSTR) {
23866+ if (nx_enabled && (error_code & PF_INSTR)) {
23867 unsigned int level;
23868
23869 pte_t *pte = lookup_address(address, &level);
23870
23871 if (pte && pte_present(*pte) && !pte_exec(*pte))
23872- printk(nx_warning, current_uid());
23873+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23874 }
23875
23876+#ifdef CONFIG_PAX_KERNEXEC
23877+ if (init_mm.start_code <= address && address < init_mm.end_code) {
23878+ if (current->signal->curr_ip)
23879+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23880+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23881+ else
23882+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23883+ current->comm, task_pid_nr(current), current_uid(), current_euid());
23884+ }
23885+#endif
23886+
23887 printk(KERN_ALERT "BUG: unable to handle kernel ");
23888 if (address < PAGE_SIZE)
23889 printk(KERN_CONT "NULL pointer dereference");
23890@@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23891 {
23892 struct task_struct *tsk = current;
23893
23894+#ifdef CONFIG_X86_64
23895+ struct mm_struct *mm = tsk->mm;
23896+
23897+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
23898+ if (regs->ip == (unsigned long)vgettimeofday) {
23899+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
23900+ return;
23901+ } else if (regs->ip == (unsigned long)vtime) {
23902+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
23903+ return;
23904+ } else if (regs->ip == (unsigned long)vgetcpu) {
23905+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
23906+ return;
23907+ }
23908+ }
23909+#endif
23910+
23911 /* User mode accesses just cause a SIGSEGV */
23912 if (error_code & PF_USER) {
23913 /*
23914@@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23915 if (is_errata100(regs, address))
23916 return;
23917
23918+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23919+ if (pax_is_fetch_fault(regs, error_code, address)) {
23920+
23921+#ifdef CONFIG_PAX_EMUTRAMP
23922+ switch (pax_handle_fetch_fault(regs)) {
23923+ case 2:
23924+ return;
23925+ }
23926+#endif
23927+
23928+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23929+ do_group_exit(SIGKILL);
23930+ }
23931+#endif
23932+
23933 if (unlikely(show_unhandled_signals))
23934 show_signal_msg(regs, error_code, address, tsk);
23935
23936@@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23937 if (fault & VM_FAULT_HWPOISON) {
23938 printk(KERN_ERR
23939 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23940- tsk->comm, tsk->pid, address);
23941+ tsk->comm, task_pid_nr(tsk), address);
23942 code = BUS_MCEERR_AR;
23943 }
23944 #endif
23945@@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23946 return 1;
23947 }
23948
23949+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23950+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23951+{
23952+ pte_t *pte;
23953+ pmd_t *pmd;
23954+ spinlock_t *ptl;
23955+ unsigned char pte_mask;
23956+
23957+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23958+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
23959+ return 0;
23960+
23961+ /* PaX: it's our fault, let's handle it if we can */
23962+
23963+ /* PaX: take a look at read faults before acquiring any locks */
23964+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23965+ /* instruction fetch attempt from a protected page in user mode */
23966+ up_read(&mm->mmap_sem);
23967+
23968+#ifdef CONFIG_PAX_EMUTRAMP
23969+ switch (pax_handle_fetch_fault(regs)) {
23970+ case 2:
23971+ return 1;
23972+ }
23973+#endif
23974+
23975+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23976+ do_group_exit(SIGKILL);
23977+ }
23978+
23979+ pmd = pax_get_pmd(mm, address);
23980+ if (unlikely(!pmd))
23981+ return 0;
23982+
23983+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23984+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23985+ pte_unmap_unlock(pte, ptl);
23986+ return 0;
23987+ }
23988+
23989+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23990+ /* write attempt to a protected page in user mode */
23991+ pte_unmap_unlock(pte, ptl);
23992+ return 0;
23993+ }
23994+
23995+#ifdef CONFIG_SMP
23996+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23997+#else
23998+ if (likely(address > get_limit(regs->cs)))
23999+#endif
24000+ {
24001+ set_pte(pte, pte_mkread(*pte));
24002+ __flush_tlb_one(address);
24003+ pte_unmap_unlock(pte, ptl);
24004+ up_read(&mm->mmap_sem);
24005+ return 1;
24006+ }
24007+
24008+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24009+
24010+ /*
24011+ * PaX: fill DTLB with user rights and retry
24012+ */
24013+ __asm__ __volatile__ (
24014+ "orb %2,(%1)\n"
24015+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24016+/*
24017+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24018+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24019+ * page fault when examined during a TLB load attempt. this is true not only
24020+ * for PTEs holding a non-present entry but also present entries that will
24021+ * raise a page fault (such as those set up by PaX, or the copy-on-write
24022+ * mechanism). in effect it means that we do *not* need to flush the TLBs
24023+ * for our target pages since their PTEs are simply not in the TLBs at all.
24024+
24025+ * the best thing in omitting it is that we gain around 15-20% speed in the
24026+ * fast path of the page fault handler and can get rid of tracing since we
24027+ * can no longer flush unintended entries.
24028+ */
24029+ "invlpg (%0)\n"
24030+#endif
24031+ __copyuser_seg"testb $0,(%0)\n"
24032+ "xorb %3,(%1)\n"
24033+ :
24034+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24035+ : "memory", "cc");
24036+ pte_unmap_unlock(pte, ptl);
24037+ up_read(&mm->mmap_sem);
24038+ return 1;
24039+}
24040+#endif
24041+
24042 /*
24043 * Handle a spurious fault caused by a stale TLB entry.
24044 *
24045@@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24046 static inline int
24047 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24048 {
24049+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24050+ return 1;
24051+
24052 if (write) {
24053 /* write, present and write, not present: */
24054 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24055@@ -956,17 +1175,31 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24056 {
24057 struct vm_area_struct *vma;
24058 struct task_struct *tsk;
24059- unsigned long address;
24060 struct mm_struct *mm;
24061 int write;
24062 int fault;
24063
24064+ /* Get the faulting address: */
24065+ unsigned long address = read_cr2();
24066+
24067+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24068+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24069+ if (!search_exception_tables(regs->ip)) {
24070+ bad_area_nosemaphore(regs, error_code, address);
24071+ return;
24072+ }
24073+ if (address < PAX_USER_SHADOW_BASE) {
24074+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24075+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24076+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24077+ } else
24078+ address -= PAX_USER_SHADOW_BASE;
24079+ }
24080+#endif
24081+
24082 tsk = current;
24083 mm = tsk->mm;
24084
24085- /* Get the faulting address: */
24086- address = read_cr2();
24087-
24088 /*
24089 * Detect and handle instructions that would cause a page fault for
24090 * both a tracked kernel page and a userspace page.
24091@@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24092 * User-mode registers count as a user access even for any
24093 * potential system fault or CPU buglet:
24094 */
24095- if (user_mode_vm(regs)) {
24096+ if (user_mode(regs)) {
24097 local_irq_enable();
24098 error_code |= PF_USER;
24099 } else {
24100@@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24101 might_sleep();
24102 }
24103
24104+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24105+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24106+ return;
24107+#endif
24108+
24109 vma = find_vma(mm, address);
24110 if (unlikely(!vma)) {
24111 bad_area(regs, error_code, address);
24112@@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24113 bad_area(regs, error_code, address);
24114 return;
24115 }
24116- if (error_code & PF_USER) {
24117- /*
24118- * Accessing the stack below %sp is always a bug.
24119- * The large cushion allows instructions like enter
24120- * and pusha to work. ("enter $65535, $31" pushes
24121- * 32 pointers and then decrements %sp by 65535.)
24122- */
24123- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24124- bad_area(regs, error_code, address);
24125- return;
24126- }
24127+ /*
24128+ * Accessing the stack below %sp is always a bug.
24129+ * The large cushion allows instructions like enter
24130+ * and pusha to work. ("enter $65535, $31" pushes
24131+ * 32 pointers and then decrements %sp by 65535.)
24132+ */
24133+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24134+ bad_area(regs, error_code, address);
24135+ return;
24136 }
24137+
24138+#ifdef CONFIG_PAX_SEGMEXEC
24139+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24140+ bad_area(regs, error_code, address);
24141+ return;
24142+ }
24143+#endif
24144+
24145 if (unlikely(expand_stack(vma, address))) {
24146 bad_area(regs, error_code, address);
24147 return;
24148@@ -1146,3 +1390,240 @@ good_area:
24149
24150 up_read(&mm->mmap_sem);
24151 }
24152+
24153+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24154+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24155+{
24156+ struct mm_struct *mm = current->mm;
24157+ unsigned long ip = regs->ip;
24158+
24159+ if (v8086_mode(regs))
24160+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24161+
24162+#ifdef CONFIG_PAX_PAGEEXEC
24163+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24164+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24165+ return true;
24166+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24167+ return true;
24168+ return false;
24169+ }
24170+#endif
24171+
24172+#ifdef CONFIG_PAX_SEGMEXEC
24173+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24174+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24175+ return true;
24176+ return false;
24177+ }
24178+#endif
24179+
24180+ return false;
24181+}
24182+#endif
24183+
24184+#ifdef CONFIG_PAX_EMUTRAMP
24185+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24186+{
24187+ int err;
24188+
24189+ do { /* PaX: gcc trampoline emulation #1 */
24190+ unsigned char mov1, mov2;
24191+ unsigned short jmp;
24192+ unsigned int addr1, addr2;
24193+
24194+#ifdef CONFIG_X86_64
24195+ if ((regs->ip + 11) >> 32)
24196+ break;
24197+#endif
24198+
24199+ err = get_user(mov1, (unsigned char __user *)regs->ip);
24200+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24201+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24202+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24203+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24204+
24205+ if (err)
24206+ break;
24207+
24208+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24209+ regs->cx = addr1;
24210+ regs->ax = addr2;
24211+ regs->ip = addr2;
24212+ return 2;
24213+ }
24214+ } while (0);
24215+
24216+ do { /* PaX: gcc trampoline emulation #2 */
24217+ unsigned char mov, jmp;
24218+ unsigned int addr1, addr2;
24219+
24220+#ifdef CONFIG_X86_64
24221+ if ((regs->ip + 9) >> 32)
24222+ break;
24223+#endif
24224+
24225+ err = get_user(mov, (unsigned char __user *)regs->ip);
24226+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24227+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24228+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24229+
24230+ if (err)
24231+ break;
24232+
24233+ if (mov == 0xB9 && jmp == 0xE9) {
24234+ regs->cx = addr1;
24235+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24236+ return 2;
24237+ }
24238+ } while (0);
24239+
24240+ return 1; /* PaX in action */
24241+}
24242+
24243+#ifdef CONFIG_X86_64
24244+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24245+{
24246+ int err;
24247+
24248+ do { /* PaX: gcc trampoline emulation #1 */
24249+ unsigned short mov1, mov2, jmp1;
24250+ unsigned char jmp2;
24251+ unsigned int addr1;
24252+ unsigned long addr2;
24253+
24254+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24255+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24256+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24257+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24258+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24259+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24260+
24261+ if (err)
24262+ break;
24263+
24264+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24265+ regs->r11 = addr1;
24266+ regs->r10 = addr2;
24267+ regs->ip = addr1;
24268+ return 2;
24269+ }
24270+ } while (0);
24271+
24272+ do { /* PaX: gcc trampoline emulation #2 */
24273+ unsigned short mov1, mov2, jmp1;
24274+ unsigned char jmp2;
24275+ unsigned long addr1, addr2;
24276+
24277+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24278+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24279+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24280+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24281+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24282+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24283+
24284+ if (err)
24285+ break;
24286+
24287+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24288+ regs->r11 = addr1;
24289+ regs->r10 = addr2;
24290+ regs->ip = addr1;
24291+ return 2;
24292+ }
24293+ } while (0);
24294+
24295+ return 1; /* PaX in action */
24296+}
24297+#endif
24298+
24299+/*
24300+ * PaX: decide what to do with offenders (regs->ip = fault address)
24301+ *
24302+ * returns 1 when task should be killed
24303+ * 2 when gcc trampoline was detected
24304+ */
24305+static int pax_handle_fetch_fault(struct pt_regs *regs)
24306+{
24307+ if (v8086_mode(regs))
24308+ return 1;
24309+
24310+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24311+ return 1;
24312+
24313+#ifdef CONFIG_X86_32
24314+ return pax_handle_fetch_fault_32(regs);
24315+#else
24316+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24317+ return pax_handle_fetch_fault_32(regs);
24318+ else
24319+ return pax_handle_fetch_fault_64(regs);
24320+#endif
24321+}
24322+#endif
24323+
24324+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24325+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24326+{
24327+ long i;
24328+
24329+ printk(KERN_ERR "PAX: bytes at PC: ");
24330+ for (i = 0; i < 20; i++) {
24331+ unsigned char c;
24332+ if (get_user(c, (unsigned char __force_user *)pc+i))
24333+ printk(KERN_CONT "?? ");
24334+ else
24335+ printk(KERN_CONT "%02x ", c);
24336+ }
24337+ printk("\n");
24338+
24339+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24340+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
24341+ unsigned long c;
24342+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
24343+#ifdef CONFIG_X86_32
24344+ printk(KERN_CONT "???????? ");
24345+#else
24346+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24347+ printk(KERN_CONT "???????? ???????? ");
24348+ else
24349+ printk(KERN_CONT "???????????????? ");
24350+#endif
24351+ } else {
24352+#ifdef CONFIG_X86_64
24353+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24354+ printk(KERN_CONT "%08x ", (unsigned int)c);
24355+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24356+ } else
24357+#endif
24358+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24359+ }
24360+ }
24361+ printk("\n");
24362+}
24363+#endif
24364+
24365+/**
24366+ * probe_kernel_write(): safely attempt to write to a location
24367+ * @dst: address to write to
24368+ * @src: pointer to the data that shall be written
24369+ * @size: size of the data chunk
24370+ *
24371+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24372+ * happens, handle that and return -EFAULT.
24373+ */
24374+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24375+{
24376+ long ret;
24377+ mm_segment_t old_fs = get_fs();
24378+
24379+ set_fs(KERNEL_DS);
24380+ pagefault_disable();
24381+ pax_open_kernel();
24382+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24383+ pax_close_kernel();
24384+ pagefault_enable();
24385+ set_fs(old_fs);
24386+
24387+ return ret ? -EFAULT : 0;
24388+}
24389diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24390index 71da1bc..7a16bf4 100644
24391--- a/arch/x86/mm/gup.c
24392+++ b/arch/x86/mm/gup.c
24393@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24394 addr = start;
24395 len = (unsigned long) nr_pages << PAGE_SHIFT;
24396 end = start + len;
24397- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24398+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24399 (void __user *)start, len)))
24400 return 0;
24401
24402diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24403index 63a6ba6..79abd7a 100644
24404--- a/arch/x86/mm/highmem_32.c
24405+++ b/arch/x86/mm/highmem_32.c
24406@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
24407 idx = type + KM_TYPE_NR*smp_processor_id();
24408 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24409 BUG_ON(!pte_none(*(kmap_pte-idx)));
24410+
24411+ pax_open_kernel();
24412 set_pte(kmap_pte-idx, mk_pte(page, prot));
24413+ pax_close_kernel();
24414
24415 return (void *)vaddr;
24416 }
24417diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24418index f46c340..6ff9a26 100644
24419--- a/arch/x86/mm/hugetlbpage.c
24420+++ b/arch/x86/mm/hugetlbpage.c
24421@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24422 struct hstate *h = hstate_file(file);
24423 struct mm_struct *mm = current->mm;
24424 struct vm_area_struct *vma;
24425- unsigned long start_addr;
24426+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24427+
24428+#ifdef CONFIG_PAX_SEGMEXEC
24429+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24430+ pax_task_size = SEGMEXEC_TASK_SIZE;
24431+#endif
24432+
24433+ pax_task_size -= PAGE_SIZE;
24434
24435 if (len > mm->cached_hole_size) {
24436- start_addr = mm->free_area_cache;
24437+ start_addr = mm->free_area_cache;
24438 } else {
24439- start_addr = TASK_UNMAPPED_BASE;
24440- mm->cached_hole_size = 0;
24441+ start_addr = mm->mmap_base;
24442+ mm->cached_hole_size = 0;
24443 }
24444
24445 full_search:
24446@@ -281,26 +288,27 @@ full_search:
24447
24448 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24449 /* At this point: (!vma || addr < vma->vm_end). */
24450- if (TASK_SIZE - len < addr) {
24451+ if (pax_task_size - len < addr) {
24452 /*
24453 * Start a new search - just in case we missed
24454 * some holes.
24455 */
24456- if (start_addr != TASK_UNMAPPED_BASE) {
24457- start_addr = TASK_UNMAPPED_BASE;
24458+ if (start_addr != mm->mmap_base) {
24459+ start_addr = mm->mmap_base;
24460 mm->cached_hole_size = 0;
24461 goto full_search;
24462 }
24463 return -ENOMEM;
24464 }
24465- if (!vma || addr + len <= vma->vm_start) {
24466- mm->free_area_cache = addr + len;
24467- return addr;
24468- }
24469+ if (check_heap_stack_gap(vma, addr, len))
24470+ break;
24471 if (addr + mm->cached_hole_size < vma->vm_start)
24472 mm->cached_hole_size = vma->vm_start - addr;
24473 addr = ALIGN(vma->vm_end, huge_page_size(h));
24474 }
24475+
24476+ mm->free_area_cache = addr + len;
24477+ return addr;
24478 }
24479
24480 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24481@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24482 {
24483 struct hstate *h = hstate_file(file);
24484 struct mm_struct *mm = current->mm;
24485- struct vm_area_struct *vma, *prev_vma;
24486- unsigned long base = mm->mmap_base, addr = addr0;
24487+ struct vm_area_struct *vma;
24488+ unsigned long base = mm->mmap_base, addr;
24489 unsigned long largest_hole = mm->cached_hole_size;
24490- int first_time = 1;
24491
24492 /* don't allow allocations above current base */
24493 if (mm->free_area_cache > base)
24494@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24495 largest_hole = 0;
24496 mm->free_area_cache = base;
24497 }
24498-try_again:
24499+
24500 /* make sure it can fit in the remaining address space */
24501 if (mm->free_area_cache < len)
24502 goto fail;
24503
24504 /* either no address requested or cant fit in requested address hole */
24505- addr = (mm->free_area_cache - len) & huge_page_mask(h);
24506+ addr = (mm->free_area_cache - len);
24507 do {
24508+ addr &= huge_page_mask(h);
24509+ vma = find_vma(mm, addr);
24510 /*
24511 * Lookup failure means no vma is above this address,
24512 * i.e. return with success:
24513- */
24514- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
24515- return addr;
24516-
24517- /*
24518 * new region fits between prev_vma->vm_end and
24519 * vma->vm_start, use it:
24520 */
24521- if (addr + len <= vma->vm_start &&
24522- (!prev_vma || (addr >= prev_vma->vm_end))) {
24523+ if (check_heap_stack_gap(vma, addr, len)) {
24524 /* remember the address as a hint for next time */
24525- mm->cached_hole_size = largest_hole;
24526- return (mm->free_area_cache = addr);
24527- } else {
24528- /* pull free_area_cache down to the first hole */
24529- if (mm->free_area_cache == vma->vm_end) {
24530- mm->free_area_cache = vma->vm_start;
24531- mm->cached_hole_size = largest_hole;
24532- }
24533+ mm->cached_hole_size = largest_hole;
24534+ return (mm->free_area_cache = addr);
24535+ }
24536+ /* pull free_area_cache down to the first hole */
24537+ if (mm->free_area_cache == vma->vm_end) {
24538+ mm->free_area_cache = vma->vm_start;
24539+ mm->cached_hole_size = largest_hole;
24540 }
24541
24542 /* remember the largest hole we saw so far */
24543 if (addr + largest_hole < vma->vm_start)
24544- largest_hole = vma->vm_start - addr;
24545+ largest_hole = vma->vm_start - addr;
24546
24547 /* try just below the current vma->vm_start */
24548- addr = (vma->vm_start - len) & huge_page_mask(h);
24549- } while (len <= vma->vm_start);
24550+ addr = skip_heap_stack_gap(vma, len);
24551+ } while (!IS_ERR_VALUE(addr));
24552
24553 fail:
24554 /*
24555- * if hint left us with no space for the requested
24556- * mapping then try again:
24557- */
24558- if (first_time) {
24559- mm->free_area_cache = base;
24560- largest_hole = 0;
24561- first_time = 0;
24562- goto try_again;
24563- }
24564- /*
24565 * A failed mmap() very likely causes application failure,
24566 * so fall back to the bottom-up function here. This scenario
24567 * can happen with large stack limits and large mmap()
24568 * allocations.
24569 */
24570- mm->free_area_cache = TASK_UNMAPPED_BASE;
24571+
24572+#ifdef CONFIG_PAX_SEGMEXEC
24573+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24574+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24575+ else
24576+#endif
24577+
24578+ mm->mmap_base = TASK_UNMAPPED_BASE;
24579+
24580+#ifdef CONFIG_PAX_RANDMMAP
24581+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24582+ mm->mmap_base += mm->delta_mmap;
24583+#endif
24584+
24585+ mm->free_area_cache = mm->mmap_base;
24586 mm->cached_hole_size = ~0UL;
24587 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24588 len, pgoff, flags);
24589@@ -387,6 +393,7 @@ fail:
24590 /*
24591 * Restore the topdown base:
24592 */
24593+ mm->mmap_base = base;
24594 mm->free_area_cache = base;
24595 mm->cached_hole_size = ~0UL;
24596
24597@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24598 struct hstate *h = hstate_file(file);
24599 struct mm_struct *mm = current->mm;
24600 struct vm_area_struct *vma;
24601+ unsigned long pax_task_size = TASK_SIZE;
24602
24603 if (len & ~huge_page_mask(h))
24604 return -EINVAL;
24605- if (len > TASK_SIZE)
24606+
24607+#ifdef CONFIG_PAX_SEGMEXEC
24608+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24609+ pax_task_size = SEGMEXEC_TASK_SIZE;
24610+#endif
24611+
24612+ pax_task_size -= PAGE_SIZE;
24613+
24614+ if (len > pax_task_size)
24615 return -ENOMEM;
24616
24617 if (flags & MAP_FIXED) {
24618@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24619 if (addr) {
24620 addr = ALIGN(addr, huge_page_size(h));
24621 vma = find_vma(mm, addr);
24622- if (TASK_SIZE - len >= addr &&
24623- (!vma || addr + len <= vma->vm_start))
24624+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24625 return addr;
24626 }
24627 if (mm->get_unmapped_area == arch_get_unmapped_area)
24628diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24629index 73ffd55..ad78676 100644
24630--- a/arch/x86/mm/init.c
24631+++ b/arch/x86/mm/init.c
24632@@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
24633 * cause a hotspot and fill up ZONE_DMA. The page tables
24634 * need roughly 0.5KB per GB.
24635 */
24636-#ifdef CONFIG_X86_32
24637- start = 0x7000;
24638-#else
24639- start = 0x8000;
24640-#endif
24641+ start = 0x100000;
24642 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
24643 tables, PAGE_SIZE);
24644 if (e820_table_start == -1UL)
24645@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24646 #endif
24647
24648 set_nx();
24649- if (nx_enabled)
24650+ if (nx_enabled && cpu_has_nx)
24651 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
24652
24653 /* Enable PSE if available */
24654@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24655 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24656 * mmio resources as well as potential bios/acpi data regions.
24657 */
24658+
24659 int devmem_is_allowed(unsigned long pagenr)
24660 {
24661+#ifdef CONFIG_GRKERNSEC_KMEM
24662+ /* allow BDA */
24663+ if (!pagenr)
24664+ return 1;
24665+ /* allow EBDA */
24666+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
24667+ return 1;
24668+ /* allow ISA/video mem */
24669+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24670+ return 1;
24671+ /* throw out everything else below 1MB */
24672+ if (pagenr <= 256)
24673+ return 0;
24674+#else
24675 if (pagenr <= 256)
24676 return 1;
24677+#endif
24678+
24679 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24680 return 0;
24681 if (!page_is_ram(pagenr))
24682@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24683
24684 void free_initmem(void)
24685 {
24686+
24687+#ifdef CONFIG_PAX_KERNEXEC
24688+#ifdef CONFIG_X86_32
24689+ /* PaX: limit KERNEL_CS to actual size */
24690+ unsigned long addr, limit;
24691+ struct desc_struct d;
24692+ int cpu;
24693+
24694+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24695+ limit = (limit - 1UL) >> PAGE_SHIFT;
24696+
24697+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24698+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
24699+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24700+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24701+ }
24702+
24703+ /* PaX: make KERNEL_CS read-only */
24704+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24705+ if (!paravirt_enabled())
24706+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24707+/*
24708+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24709+ pgd = pgd_offset_k(addr);
24710+ pud = pud_offset(pgd, addr);
24711+ pmd = pmd_offset(pud, addr);
24712+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24713+ }
24714+*/
24715+#ifdef CONFIG_X86_PAE
24716+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24717+/*
24718+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24719+ pgd = pgd_offset_k(addr);
24720+ pud = pud_offset(pgd, addr);
24721+ pmd = pmd_offset(pud, addr);
24722+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24723+ }
24724+*/
24725+#endif
24726+
24727+#ifdef CONFIG_MODULES
24728+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24729+#endif
24730+
24731+#else
24732+ pgd_t *pgd;
24733+ pud_t *pud;
24734+ pmd_t *pmd;
24735+ unsigned long addr, end;
24736+
24737+ /* PaX: make kernel code/rodata read-only, rest non-executable */
24738+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24739+ pgd = pgd_offset_k(addr);
24740+ pud = pud_offset(pgd, addr);
24741+ pmd = pmd_offset(pud, addr);
24742+ if (!pmd_present(*pmd))
24743+ continue;
24744+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24745+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24746+ else
24747+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24748+ }
24749+
24750+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24751+ end = addr + KERNEL_IMAGE_SIZE;
24752+ for (; addr < end; addr += PMD_SIZE) {
24753+ pgd = pgd_offset_k(addr);
24754+ pud = pud_offset(pgd, addr);
24755+ pmd = pmd_offset(pud, addr);
24756+ if (!pmd_present(*pmd))
24757+ continue;
24758+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24759+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24760+ }
24761+#endif
24762+
24763+ flush_tlb_all();
24764+#endif
24765+
24766 free_init_pages("unused kernel memory",
24767 (unsigned long)(&__init_begin),
24768 (unsigned long)(&__init_end));
24769diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24770index 30938c1..bda3d5d 100644
24771--- a/arch/x86/mm/init_32.c
24772+++ b/arch/x86/mm/init_32.c
24773@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
24774 }
24775
24776 /*
24777- * Creates a middle page table and puts a pointer to it in the
24778- * given global directory entry. This only returns the gd entry
24779- * in non-PAE compilation mode, since the middle layer is folded.
24780- */
24781-static pmd_t * __init one_md_table_init(pgd_t *pgd)
24782-{
24783- pud_t *pud;
24784- pmd_t *pmd_table;
24785-
24786-#ifdef CONFIG_X86_PAE
24787- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24788- if (after_bootmem)
24789- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24790- else
24791- pmd_table = (pmd_t *)alloc_low_page();
24792- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24793- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24794- pud = pud_offset(pgd, 0);
24795- BUG_ON(pmd_table != pmd_offset(pud, 0));
24796-
24797- return pmd_table;
24798- }
24799-#endif
24800- pud = pud_offset(pgd, 0);
24801- pmd_table = pmd_offset(pud, 0);
24802-
24803- return pmd_table;
24804-}
24805-
24806-/*
24807 * Create a page table and place a pointer to it in a middle page
24808 * directory entry:
24809 */
24810@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24811 page_table = (pte_t *)alloc_low_page();
24812
24813 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24814+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24815+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24816+#else
24817 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24818+#endif
24819 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24820 }
24821
24822 return pte_offset_kernel(pmd, 0);
24823 }
24824
24825+static pmd_t * __init one_md_table_init(pgd_t *pgd)
24826+{
24827+ pud_t *pud;
24828+ pmd_t *pmd_table;
24829+
24830+ pud = pud_offset(pgd, 0);
24831+ pmd_table = pmd_offset(pud, 0);
24832+
24833+ return pmd_table;
24834+}
24835+
24836 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24837 {
24838 int pgd_idx = pgd_index(vaddr);
24839@@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24840 int pgd_idx, pmd_idx;
24841 unsigned long vaddr;
24842 pgd_t *pgd;
24843+ pud_t *pud;
24844 pmd_t *pmd;
24845 pte_t *pte = NULL;
24846
24847@@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24848 pgd = pgd_base + pgd_idx;
24849
24850 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24851- pmd = one_md_table_init(pgd);
24852- pmd = pmd + pmd_index(vaddr);
24853+ pud = pud_offset(pgd, vaddr);
24854+ pmd = pmd_offset(pud, vaddr);
24855+
24856+#ifdef CONFIG_X86_PAE
24857+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24858+#endif
24859+
24860 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24861 pmd++, pmd_idx++) {
24862 pte = page_table_kmap_check(one_page_table_init(pmd),
24863@@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24864 }
24865 }
24866
24867-static inline int is_kernel_text(unsigned long addr)
24868+static inline int is_kernel_text(unsigned long start, unsigned long end)
24869 {
24870- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
24871- return 1;
24872- return 0;
24873+ if ((start > ktla_ktva((unsigned long)_etext) ||
24874+ end <= ktla_ktva((unsigned long)_stext)) &&
24875+ (start > ktla_ktva((unsigned long)_einittext) ||
24876+ end <= ktla_ktva((unsigned long)_sinittext)) &&
24877+
24878+#ifdef CONFIG_ACPI_SLEEP
24879+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24880+#endif
24881+
24882+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24883+ return 0;
24884+ return 1;
24885 }
24886
24887 /*
24888@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
24889 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
24890 unsigned long start_pfn, end_pfn;
24891 pgd_t *pgd_base = swapper_pg_dir;
24892- int pgd_idx, pmd_idx, pte_ofs;
24893+ unsigned int pgd_idx, pmd_idx, pte_ofs;
24894 unsigned long pfn;
24895 pgd_t *pgd;
24896+ pud_t *pud;
24897 pmd_t *pmd;
24898 pte_t *pte;
24899 unsigned pages_2m, pages_4k;
24900@@ -278,8 +279,13 @@ repeat:
24901 pfn = start_pfn;
24902 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24903 pgd = pgd_base + pgd_idx;
24904- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24905- pmd = one_md_table_init(pgd);
24906+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24907+ pud = pud_offset(pgd, 0);
24908+ pmd = pmd_offset(pud, 0);
24909+
24910+#ifdef CONFIG_X86_PAE
24911+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24912+#endif
24913
24914 if (pfn >= end_pfn)
24915 continue;
24916@@ -291,14 +297,13 @@ repeat:
24917 #endif
24918 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24919 pmd++, pmd_idx++) {
24920- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24921+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24922
24923 /*
24924 * Map with big pages if possible, otherwise
24925 * create normal page tables:
24926 */
24927 if (use_pse) {
24928- unsigned int addr2;
24929 pgprot_t prot = PAGE_KERNEL_LARGE;
24930 /*
24931 * first pass will use the same initial
24932@@ -308,11 +313,7 @@ repeat:
24933 __pgprot(PTE_IDENT_ATTR |
24934 _PAGE_PSE);
24935
24936- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24937- PAGE_OFFSET + PAGE_SIZE-1;
24938-
24939- if (is_kernel_text(addr) ||
24940- is_kernel_text(addr2))
24941+ if (is_kernel_text(address, address + PMD_SIZE))
24942 prot = PAGE_KERNEL_LARGE_EXEC;
24943
24944 pages_2m++;
24945@@ -329,7 +330,7 @@ repeat:
24946 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24947 pte += pte_ofs;
24948 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24949- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24950+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24951 pgprot_t prot = PAGE_KERNEL;
24952 /*
24953 * first pass will use the same initial
24954@@ -337,7 +338,7 @@ repeat:
24955 */
24956 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24957
24958- if (is_kernel_text(addr))
24959+ if (is_kernel_text(address, address + PAGE_SIZE))
24960 prot = PAGE_KERNEL_EXEC;
24961
24962 pages_4k++;
24963@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24964
24965 pud = pud_offset(pgd, va);
24966 pmd = pmd_offset(pud, va);
24967- if (!pmd_present(*pmd))
24968+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
24969 break;
24970
24971 pte = pte_offset_kernel(pmd, va);
24972@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
24973
24974 static void __init pagetable_init(void)
24975 {
24976- pgd_t *pgd_base = swapper_pg_dir;
24977-
24978- permanent_kmaps_init(pgd_base);
24979+ permanent_kmaps_init(swapper_pg_dir);
24980 }
24981
24982 #ifdef CONFIG_ACPI_SLEEP
24983@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
24984 * ACPI suspend needs this for resume, because things like the intel-agp
24985 * driver might have split up a kernel 4MB mapping.
24986 */
24987-char swsusp_pg_dir[PAGE_SIZE]
24988+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
24989 __attribute__ ((aligned(PAGE_SIZE)));
24990
24991 static inline void save_pg_dir(void)
24992 {
24993- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
24994+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
24995 }
24996 #else /* !CONFIG_ACPI_SLEEP */
24997 static inline void save_pg_dir(void)
24998@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
24999 flush_tlb_all();
25000 }
25001
25002-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25003+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25004 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25005
25006 /* user-defined highmem size */
25007@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25008 * Initialize the boot-time allocator (with low memory only):
25009 */
25010 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25011- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25012+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25013 PAGE_SIZE);
25014 if (bootmap == -1L)
25015 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25016@@ -864,6 +863,12 @@ void __init mem_init(void)
25017
25018 pci_iommu_alloc();
25019
25020+#ifdef CONFIG_PAX_PER_CPU_PGD
25021+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25022+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25023+ KERNEL_PGD_PTRS);
25024+#endif
25025+
25026 #ifdef CONFIG_FLATMEM
25027 BUG_ON(!mem_map);
25028 #endif
25029@@ -881,7 +886,7 @@ void __init mem_init(void)
25030 set_highmem_pages_init();
25031
25032 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25033- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25034+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25035 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25036
25037 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25038@@ -923,10 +928,10 @@ void __init mem_init(void)
25039 ((unsigned long)&__init_end -
25040 (unsigned long)&__init_begin) >> 10,
25041
25042- (unsigned long)&_etext, (unsigned long)&_edata,
25043- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25044+ (unsigned long)&_sdata, (unsigned long)&_edata,
25045+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25046
25047- (unsigned long)&_text, (unsigned long)&_etext,
25048+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25049 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25050
25051 /*
25052@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25053 if (!kernel_set_to_readonly)
25054 return;
25055
25056+ start = ktla_ktva(start);
25057 pr_debug("Set kernel text: %lx - %lx for read write\n",
25058 start, start+size);
25059
25060@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25061 if (!kernel_set_to_readonly)
25062 return;
25063
25064+ start = ktla_ktva(start);
25065 pr_debug("Set kernel text: %lx - %lx for read only\n",
25066 start, start+size);
25067
25068@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25069 unsigned long start = PFN_ALIGN(_text);
25070 unsigned long size = PFN_ALIGN(_etext) - start;
25071
25072+ start = ktla_ktva(start);
25073 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25074 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25075 size >> 10);
25076diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25077index 7d095ad..25d2549 100644
25078--- a/arch/x86/mm/init_64.c
25079+++ b/arch/x86/mm/init_64.c
25080@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25081 pmd = fill_pmd(pud, vaddr);
25082 pte = fill_pte(pmd, vaddr);
25083
25084+ pax_open_kernel();
25085 set_pte(pte, new_pte);
25086+ pax_close_kernel();
25087
25088 /*
25089 * It's enough to flush this one mapping.
25090@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25091 pgd = pgd_offset_k((unsigned long)__va(phys));
25092 if (pgd_none(*pgd)) {
25093 pud = (pud_t *) spp_getpage();
25094- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25095- _PAGE_USER));
25096+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25097 }
25098 pud = pud_offset(pgd, (unsigned long)__va(phys));
25099 if (pud_none(*pud)) {
25100 pmd = (pmd_t *) spp_getpage();
25101- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25102- _PAGE_USER));
25103+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25104 }
25105 pmd = pmd_offset(pud, phys);
25106 BUG_ON(!pmd_none(*pmd));
25107@@ -675,6 +675,12 @@ void __init mem_init(void)
25108
25109 pci_iommu_alloc();
25110
25111+#ifdef CONFIG_PAX_PER_CPU_PGD
25112+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25113+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25114+ KERNEL_PGD_PTRS);
25115+#endif
25116+
25117 /* clear_bss() already clear the empty_zero_page */
25118
25119 reservedpages = 0;
25120@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
25121 static struct vm_area_struct gate_vma = {
25122 .vm_start = VSYSCALL_START,
25123 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25124- .vm_page_prot = PAGE_READONLY_EXEC,
25125- .vm_flags = VM_READ | VM_EXEC
25126+ .vm_page_prot = PAGE_READONLY,
25127+ .vm_flags = VM_READ
25128 };
25129
25130 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
25131@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
25132
25133 const char *arch_vma_name(struct vm_area_struct *vma)
25134 {
25135- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25136+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25137 return "[vdso]";
25138 if (vma == &gate_vma)
25139 return "[vsyscall]";
25140diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25141index 84e236c..69bd3f6 100644
25142--- a/arch/x86/mm/iomap_32.c
25143+++ b/arch/x86/mm/iomap_32.c
25144@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
25145 debug_kmap_atomic(type);
25146 idx = type + KM_TYPE_NR * smp_processor_id();
25147 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25148+
25149+ pax_open_kernel();
25150 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25151+ pax_close_kernel();
25152+
25153 arch_flush_lazy_mmu_mode();
25154
25155 return (void *)vaddr;
25156diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25157index 2feb9bd..3646202 100644
25158--- a/arch/x86/mm/ioremap.c
25159+++ b/arch/x86/mm/ioremap.c
25160@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
25161 * Second special case: Some BIOSen report the PC BIOS
25162 * area (640->1Mb) as ram even though it is not.
25163 */
25164- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
25165- pagenr < (BIOS_END >> PAGE_SHIFT))
25166+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
25167+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25168 return 0;
25169
25170 for (i = 0; i < e820.nr_map; i++) {
25171@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25172 /*
25173 * Don't allow anybody to remap normal RAM that we're using..
25174 */
25175- for (pfn = phys_addr >> PAGE_SHIFT;
25176- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
25177- pfn++) {
25178-
25179+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
25180 int is_ram = page_is_ram(pfn);
25181
25182- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25183+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25184 return NULL;
25185 WARN_ON_ONCE(is_ram);
25186 }
25187@@ -407,7 +404,7 @@ static int __init early_ioremap_debug_setup(char *str)
25188 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25189
25190 static __initdata int after_paging_init;
25191-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25192+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25193
25194 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25195 {
25196@@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
25197 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25198
25199 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25200- memset(bm_pte, 0, sizeof(bm_pte));
25201- pmd_populate_kernel(&init_mm, pmd, bm_pte);
25202+ pmd_populate_user(&init_mm, pmd, bm_pte);
25203
25204 /*
25205 * The boot-ioremap range spans multiple pmds, for which
25206diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25207index 8cc1833..1abbc5b 100644
25208--- a/arch/x86/mm/kmemcheck/kmemcheck.c
25209+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25210@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25211 * memory (e.g. tracked pages)? For now, we need this to avoid
25212 * invoking kmemcheck for PnP BIOS calls.
25213 */
25214- if (regs->flags & X86_VM_MASK)
25215+ if (v8086_mode(regs))
25216 return false;
25217- if (regs->cs != __KERNEL_CS)
25218+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25219 return false;
25220
25221 pte = kmemcheck_pte_lookup(address);
25222diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25223index c8191de..2975082 100644
25224--- a/arch/x86/mm/mmap.c
25225+++ b/arch/x86/mm/mmap.c
25226@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
25227 * Leave an at least ~128 MB hole with possible stack randomization.
25228 */
25229 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25230-#define MAX_GAP (TASK_SIZE/6*5)
25231+#define MAX_GAP (pax_task_size/6*5)
25232
25233 /*
25234 * True on X86_32 or when emulating IA32 on X86_64
25235@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
25236 return rnd << PAGE_SHIFT;
25237 }
25238
25239-static unsigned long mmap_base(void)
25240+static unsigned long mmap_base(struct mm_struct *mm)
25241 {
25242 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
25243+ unsigned long pax_task_size = TASK_SIZE;
25244+
25245+#ifdef CONFIG_PAX_SEGMEXEC
25246+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25247+ pax_task_size = SEGMEXEC_TASK_SIZE;
25248+#endif
25249
25250 if (gap < MIN_GAP)
25251 gap = MIN_GAP;
25252 else if (gap > MAX_GAP)
25253 gap = MAX_GAP;
25254
25255- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25256+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25257 }
25258
25259 /*
25260 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25261 * does, but not when emulating X86_32
25262 */
25263-static unsigned long mmap_legacy_base(void)
25264+static unsigned long mmap_legacy_base(struct mm_struct *mm)
25265 {
25266- if (mmap_is_ia32())
25267+ if (mmap_is_ia32()) {
25268+
25269+#ifdef CONFIG_PAX_SEGMEXEC
25270+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25271+ return SEGMEXEC_TASK_UNMAPPED_BASE;
25272+ else
25273+#endif
25274+
25275 return TASK_UNMAPPED_BASE;
25276- else
25277+ } else
25278 return TASK_UNMAPPED_BASE + mmap_rnd();
25279 }
25280
25281@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
25282 void arch_pick_mmap_layout(struct mm_struct *mm)
25283 {
25284 if (mmap_is_legacy()) {
25285- mm->mmap_base = mmap_legacy_base();
25286+ mm->mmap_base = mmap_legacy_base(mm);
25287+
25288+#ifdef CONFIG_PAX_RANDMMAP
25289+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25290+ mm->mmap_base += mm->delta_mmap;
25291+#endif
25292+
25293 mm->get_unmapped_area = arch_get_unmapped_area;
25294 mm->unmap_area = arch_unmap_area;
25295 } else {
25296- mm->mmap_base = mmap_base();
25297+ mm->mmap_base = mmap_base(mm);
25298+
25299+#ifdef CONFIG_PAX_RANDMMAP
25300+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25301+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25302+#endif
25303+
25304 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25305 mm->unmap_area = arch_unmap_area_topdown;
25306 }
25307diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25308index 132772a..b961f11 100644
25309--- a/arch/x86/mm/mmio-mod.c
25310+++ b/arch/x86/mm/mmio-mod.c
25311@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25312 break;
25313 default:
25314 {
25315- unsigned char *ip = (unsigned char *)instptr;
25316+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25317 my_trace->opcode = MMIO_UNKNOWN_OP;
25318 my_trace->width = 0;
25319 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25320@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25321 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25322 void __iomem *addr)
25323 {
25324- static atomic_t next_id;
25325+ static atomic_unchecked_t next_id;
25326 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25327 /* These are page-unaligned. */
25328 struct mmiotrace_map map = {
25329@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25330 .private = trace
25331 },
25332 .phys = offset,
25333- .id = atomic_inc_return(&next_id)
25334+ .id = atomic_inc_return_unchecked(&next_id)
25335 };
25336 map.map_id = trace->id;
25337
25338diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
25339index d253006..e56dd6a 100644
25340--- a/arch/x86/mm/numa_32.c
25341+++ b/arch/x86/mm/numa_32.c
25342@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
25343 }
25344 #endif
25345
25346-extern unsigned long find_max_low_pfn(void);
25347 extern unsigned long highend_pfn, highstart_pfn;
25348
25349 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
25350diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25351index e1d1069..2251ff3 100644
25352--- a/arch/x86/mm/pageattr-test.c
25353+++ b/arch/x86/mm/pageattr-test.c
25354@@ -36,7 +36,7 @@ enum {
25355
25356 static int pte_testbit(pte_t pte)
25357 {
25358- return pte_flags(pte) & _PAGE_UNUSED1;
25359+ return pte_flags(pte) & _PAGE_CPA_TEST;
25360 }
25361
25362 struct split_state {
25363diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25364index dd38bfb..8c12306 100644
25365--- a/arch/x86/mm/pageattr.c
25366+++ b/arch/x86/mm/pageattr.c
25367@@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25368 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
25369 */
25370 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25371- pgprot_val(forbidden) |= _PAGE_NX;
25372+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25373
25374 /*
25375 * The kernel text needs to be executable for obvious reasons
25376 * Does not cover __inittext since that is gone later on. On
25377 * 64bit we do not enforce !NX on the low mapping
25378 */
25379- if (within(address, (unsigned long)_text, (unsigned long)_etext))
25380- pgprot_val(forbidden) |= _PAGE_NX;
25381+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25382+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25383
25384+#ifdef CONFIG_DEBUG_RODATA
25385 /*
25386 * The .rodata section needs to be read-only. Using the pfn
25387 * catches all aliases.
25388@@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25389 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25390 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25391 pgprot_val(forbidden) |= _PAGE_RW;
25392+#endif
25393+
25394+#ifdef CONFIG_PAX_KERNEXEC
25395+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25396+ pgprot_val(forbidden) |= _PAGE_RW;
25397+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25398+ }
25399+#endif
25400
25401 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25402
25403@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25404 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25405 {
25406 /* change init_mm */
25407+ pax_open_kernel();
25408 set_pte_atomic(kpte, pte);
25409+
25410 #ifdef CONFIG_X86_32
25411 if (!SHARED_KERNEL_PMD) {
25412+
25413+#ifdef CONFIG_PAX_PER_CPU_PGD
25414+ unsigned long cpu;
25415+#else
25416 struct page *page;
25417+#endif
25418
25419+#ifdef CONFIG_PAX_PER_CPU_PGD
25420+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
25421+ pgd_t *pgd = get_cpu_pgd(cpu);
25422+#else
25423 list_for_each_entry(page, &pgd_list, lru) {
25424- pgd_t *pgd;
25425+ pgd_t *pgd = (pgd_t *)page_address(page);
25426+#endif
25427+
25428 pud_t *pud;
25429 pmd_t *pmd;
25430
25431- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25432+ pgd += pgd_index(address);
25433 pud = pud_offset(pgd, address);
25434 pmd = pmd_offset(pud, address);
25435 set_pte_atomic((pte_t *)pmd, pte);
25436 }
25437 }
25438 #endif
25439+ pax_close_kernel();
25440 }
25441
25442 static int
25443diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25444index e78cd0e..de0a817 100644
25445--- a/arch/x86/mm/pat.c
25446+++ b/arch/x86/mm/pat.c
25447@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
25448
25449 conflict:
25450 printk(KERN_INFO "%s:%d conflicting memory types "
25451- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
25452+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
25453 new->end, cattr_name(new->type), cattr_name(entry->type));
25454 return -EBUSY;
25455 }
25456@@ -559,7 +559,7 @@ unlock_ret:
25457
25458 if (err) {
25459 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25460- current->comm, current->pid, start, end);
25461+ current->comm, task_pid_nr(current), start, end);
25462 }
25463
25464 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
25465@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25466 while (cursor < to) {
25467 if (!devmem_is_allowed(pfn)) {
25468 printk(KERN_INFO
25469- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25470- current->comm, from, to);
25471+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25472+ current->comm, from, to, cursor);
25473 return 0;
25474 }
25475 cursor += PAGE_SIZE;
25476@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25477 printk(KERN_INFO
25478 "%s:%d ioremap_change_attr failed %s "
25479 "for %Lx-%Lx\n",
25480- current->comm, current->pid,
25481+ current->comm, task_pid_nr(current),
25482 cattr_name(flags),
25483 base, (unsigned long long)(base + size));
25484 return -EINVAL;
25485@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25486 free_memtype(paddr, paddr + size);
25487 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25488 " for %Lx-%Lx, got %s\n",
25489- current->comm, current->pid,
25490+ current->comm, task_pid_nr(current),
25491 cattr_name(want_flags),
25492 (unsigned long long)paddr,
25493 (unsigned long long)(paddr + size),
25494diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25495index df3d5c8..c2223e1 100644
25496--- a/arch/x86/mm/pf_in.c
25497+++ b/arch/x86/mm/pf_in.c
25498@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25499 int i;
25500 enum reason_type rv = OTHERS;
25501
25502- p = (unsigned char *)ins_addr;
25503+ p = (unsigned char *)ktla_ktva(ins_addr);
25504 p += skip_prefix(p, &prf);
25505 p += get_opcode(p, &opcode);
25506
25507@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25508 struct prefix_bits prf;
25509 int i;
25510
25511- p = (unsigned char *)ins_addr;
25512+ p = (unsigned char *)ktla_ktva(ins_addr);
25513 p += skip_prefix(p, &prf);
25514 p += get_opcode(p, &opcode);
25515
25516@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25517 struct prefix_bits prf;
25518 int i;
25519
25520- p = (unsigned char *)ins_addr;
25521+ p = (unsigned char *)ktla_ktva(ins_addr);
25522 p += skip_prefix(p, &prf);
25523 p += get_opcode(p, &opcode);
25524
25525@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25526 int i;
25527 unsigned long rv;
25528
25529- p = (unsigned char *)ins_addr;
25530+ p = (unsigned char *)ktla_ktva(ins_addr);
25531 p += skip_prefix(p, &prf);
25532 p += get_opcode(p, &opcode);
25533 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25534@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25535 int i;
25536 unsigned long rv;
25537
25538- p = (unsigned char *)ins_addr;
25539+ p = (unsigned char *)ktla_ktva(ins_addr);
25540 p += skip_prefix(p, &prf);
25541 p += get_opcode(p, &opcode);
25542 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25543diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25544index e0e6fad..6b90017 100644
25545--- a/arch/x86/mm/pgtable.c
25546+++ b/arch/x86/mm/pgtable.c
25547@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
25548 list_del(&page->lru);
25549 }
25550
25551-#define UNSHARED_PTRS_PER_PGD \
25552- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25553+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25554+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25555
25556+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25557+{
25558+ while (count--)
25559+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25560+}
25561+#endif
25562+
25563+#ifdef CONFIG_PAX_PER_CPU_PGD
25564+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25565+{
25566+ while (count--)
25567+
25568+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25569+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
25570+#else
25571+ *dst++ = *src++;
25572+#endif
25573+
25574+}
25575+#endif
25576+
25577+#ifdef CONFIG_X86_64
25578+#define pxd_t pud_t
25579+#define pyd_t pgd_t
25580+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25581+#define pxd_free(mm, pud) pud_free((mm), (pud))
25582+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25583+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
25584+#define PYD_SIZE PGDIR_SIZE
25585+#else
25586+#define pxd_t pmd_t
25587+#define pyd_t pud_t
25588+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25589+#define pxd_free(mm, pud) pmd_free((mm), (pud))
25590+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25591+#define pyd_offset(mm ,address) pud_offset((mm), (address))
25592+#define PYD_SIZE PUD_SIZE
25593+#endif
25594+
25595+#ifdef CONFIG_PAX_PER_CPU_PGD
25596+static inline void pgd_ctor(pgd_t *pgd) {}
25597+static inline void pgd_dtor(pgd_t *pgd) {}
25598+#else
25599 static void pgd_ctor(pgd_t *pgd)
25600 {
25601 /* If the pgd points to a shared pagetable level (either the
25602@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
25603 pgd_list_del(pgd);
25604 spin_unlock_irqrestore(&pgd_lock, flags);
25605 }
25606+#endif
25607
25608 /*
25609 * List of all pgd's needed for non-PAE so it can invalidate entries
25610@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
25611 * -- wli
25612 */
25613
25614-#ifdef CONFIG_X86_PAE
25615+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25616 /*
25617 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25618 * updating the top-level pagetable entries to guarantee the
25619@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
25620 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25621 * and initialize the kernel pmds here.
25622 */
25623-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25624+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25625
25626 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25627 {
25628@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25629 */
25630 flush_tlb_mm(mm);
25631 }
25632+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25633+#define PREALLOCATED_PXDS USER_PGD_PTRS
25634 #else /* !CONFIG_X86_PAE */
25635
25636 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25637-#define PREALLOCATED_PMDS 0
25638+#define PREALLOCATED_PXDS 0
25639
25640 #endif /* CONFIG_X86_PAE */
25641
25642-static void free_pmds(pmd_t *pmds[])
25643+static void free_pxds(pxd_t *pxds[])
25644 {
25645 int i;
25646
25647- for(i = 0; i < PREALLOCATED_PMDS; i++)
25648- if (pmds[i])
25649- free_page((unsigned long)pmds[i]);
25650+ for(i = 0; i < PREALLOCATED_PXDS; i++)
25651+ if (pxds[i])
25652+ free_page((unsigned long)pxds[i]);
25653 }
25654
25655-static int preallocate_pmds(pmd_t *pmds[])
25656+static int preallocate_pxds(pxd_t *pxds[])
25657 {
25658 int i;
25659 bool failed = false;
25660
25661- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25662- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25663- if (pmd == NULL)
25664+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25665+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25666+ if (pxd == NULL)
25667 failed = true;
25668- pmds[i] = pmd;
25669+ pxds[i] = pxd;
25670 }
25671
25672 if (failed) {
25673- free_pmds(pmds);
25674+ free_pxds(pxds);
25675 return -ENOMEM;
25676 }
25677
25678@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
25679 * preallocate which never got a corresponding vma will need to be
25680 * freed manually.
25681 */
25682-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25683+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25684 {
25685 int i;
25686
25687- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25688+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25689 pgd_t pgd = pgdp[i];
25690
25691 if (pgd_val(pgd) != 0) {
25692- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25693+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25694
25695- pgdp[i] = native_make_pgd(0);
25696+ set_pgd(pgdp + i, native_make_pgd(0));
25697
25698- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25699- pmd_free(mm, pmd);
25700+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25701+ pxd_free(mm, pxd);
25702 }
25703 }
25704 }
25705
25706-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25707+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25708 {
25709- pud_t *pud;
25710+ pyd_t *pyd;
25711 unsigned long addr;
25712 int i;
25713
25714- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25715+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25716 return;
25717
25718- pud = pud_offset(pgd, 0);
25719+#ifdef CONFIG_X86_64
25720+ pyd = pyd_offset(mm, 0L);
25721+#else
25722+ pyd = pyd_offset(pgd, 0L);
25723+#endif
25724
25725- for (addr = i = 0; i < PREALLOCATED_PMDS;
25726- i++, pud++, addr += PUD_SIZE) {
25727- pmd_t *pmd = pmds[i];
25728+ for (addr = i = 0; i < PREALLOCATED_PXDS;
25729+ i++, pyd++, addr += PYD_SIZE) {
25730+ pxd_t *pxd = pxds[i];
25731
25732 if (i >= KERNEL_PGD_BOUNDARY)
25733- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25734- sizeof(pmd_t) * PTRS_PER_PMD);
25735+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25736+ sizeof(pxd_t) * PTRS_PER_PMD);
25737
25738- pud_populate(mm, pud, pmd);
25739+ pyd_populate(mm, pyd, pxd);
25740 }
25741 }
25742
25743 pgd_t *pgd_alloc(struct mm_struct *mm)
25744 {
25745 pgd_t *pgd;
25746- pmd_t *pmds[PREALLOCATED_PMDS];
25747+ pxd_t *pxds[PREALLOCATED_PXDS];
25748+
25749 unsigned long flags;
25750
25751 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25752@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25753
25754 mm->pgd = pgd;
25755
25756- if (preallocate_pmds(pmds) != 0)
25757+ if (preallocate_pxds(pxds) != 0)
25758 goto out_free_pgd;
25759
25760 if (paravirt_pgd_alloc(mm) != 0)
25761- goto out_free_pmds;
25762+ goto out_free_pxds;
25763
25764 /*
25765 * Make sure that pre-populating the pmds is atomic with
25766@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25767 spin_lock_irqsave(&pgd_lock, flags);
25768
25769 pgd_ctor(pgd);
25770- pgd_prepopulate_pmd(mm, pgd, pmds);
25771+ pgd_prepopulate_pxd(mm, pgd, pxds);
25772
25773 spin_unlock_irqrestore(&pgd_lock, flags);
25774
25775 return pgd;
25776
25777-out_free_pmds:
25778- free_pmds(pmds);
25779+out_free_pxds:
25780+ free_pxds(pxds);
25781 out_free_pgd:
25782 free_page((unsigned long)pgd);
25783 out:
25784@@ -287,7 +338,7 @@ out:
25785
25786 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25787 {
25788- pgd_mop_up_pmds(mm, pgd);
25789+ pgd_mop_up_pxds(mm, pgd);
25790 pgd_dtor(pgd);
25791 paravirt_pgd_free(mm, pgd);
25792 free_page((unsigned long)pgd);
25793diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25794index 46c8834..fcab43d 100644
25795--- a/arch/x86/mm/pgtable_32.c
25796+++ b/arch/x86/mm/pgtable_32.c
25797@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25798 return;
25799 }
25800 pte = pte_offset_kernel(pmd, vaddr);
25801+
25802+ pax_open_kernel();
25803 if (pte_val(pteval))
25804 set_pte_at(&init_mm, vaddr, pte, pteval);
25805 else
25806 pte_clear(&init_mm, vaddr, pte);
25807+ pax_close_kernel();
25808
25809 /*
25810 * It's enough to flush this one mapping.
25811diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25812index 513d8ed..978c161 100644
25813--- a/arch/x86/mm/setup_nx.c
25814+++ b/arch/x86/mm/setup_nx.c
25815@@ -4,11 +4,10 @@
25816
25817 #include <asm/pgtable.h>
25818
25819+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25820 int nx_enabled;
25821
25822-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25823-static int disable_nx __cpuinitdata;
25824-
25825+#ifndef CONFIG_PAX_PAGEEXEC
25826 /*
25827 * noexec = on|off
25828 *
25829@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
25830 if (!str)
25831 return -EINVAL;
25832 if (!strncmp(str, "on", 2)) {
25833- __supported_pte_mask |= _PAGE_NX;
25834- disable_nx = 0;
25835+ nx_enabled = 1;
25836 } else if (!strncmp(str, "off", 3)) {
25837- disable_nx = 1;
25838- __supported_pte_mask &= ~_PAGE_NX;
25839+ nx_enabled = 0;
25840 }
25841 return 0;
25842 }
25843 early_param("noexec", noexec_setup);
25844 #endif
25845+#endif
25846
25847 #ifdef CONFIG_X86_PAE
25848 void __init set_nx(void)
25849 {
25850- unsigned int v[4], l, h;
25851+ if (!nx_enabled && cpu_has_nx) {
25852+ unsigned l, h;
25853
25854- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
25855- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
25856-
25857- if ((v[3] & (1 << 20)) && !disable_nx) {
25858- rdmsr(MSR_EFER, l, h);
25859- l |= EFER_NX;
25860- wrmsr(MSR_EFER, l, h);
25861- nx_enabled = 1;
25862- __supported_pte_mask |= _PAGE_NX;
25863- }
25864+ __supported_pte_mask &= ~_PAGE_NX;
25865+ rdmsr(MSR_EFER, l, h);
25866+ l &= ~EFER_NX;
25867+ wrmsr(MSR_EFER, l, h);
25868 }
25869 }
25870 #else
25871@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
25872 unsigned long efer;
25873
25874 rdmsrl(MSR_EFER, efer);
25875- if (!(efer & EFER_NX) || disable_nx)
25876+ if (!(efer & EFER_NX) || !nx_enabled)
25877 __supported_pte_mask &= ~_PAGE_NX;
25878 }
25879 #endif
25880diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25881index 36fe08e..b123d3a 100644
25882--- a/arch/x86/mm/tlb.c
25883+++ b/arch/x86/mm/tlb.c
25884@@ -61,7 +61,11 @@ void leave_mm(int cpu)
25885 BUG();
25886 cpumask_clear_cpu(cpu,
25887 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25888+
25889+#ifndef CONFIG_PAX_PER_CPU_PGD
25890 load_cr3(swapper_pg_dir);
25891+#endif
25892+
25893 }
25894 EXPORT_SYMBOL_GPL(leave_mm);
25895
25896diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
25897index 044897b..a195924 100644
25898--- a/arch/x86/oprofile/backtrace.c
25899+++ b/arch/x86/oprofile/backtrace.c
25900@@ -57,7 +57,7 @@ static struct frame_head *dump_user_backtrace(struct frame_head *head)
25901 struct frame_head bufhead[2];
25902
25903 /* Also check accessibility of one struct frame_head beyond */
25904- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
25905+ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
25906 return NULL;
25907 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
25908 return NULL;
25909@@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
25910 {
25911 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
25912
25913- if (!user_mode_vm(regs)) {
25914+ if (!user_mode(regs)) {
25915 unsigned long stack = kernel_stack_pointer(regs);
25916 if (depth)
25917 dump_trace(NULL, regs, (unsigned long *)stack, 0,
25918diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
25919index e6a160a..36deff6 100644
25920--- a/arch/x86/oprofile/op_model_p4.c
25921+++ b/arch/x86/oprofile/op_model_p4.c
25922@@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
25923 #endif
25924 }
25925
25926-static int inline addr_increment(void)
25927+static inline int addr_increment(void)
25928 {
25929 #ifdef CONFIG_SMP
25930 return smp_num_siblings == 2 ? 2 : 1;
25931diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
25932index 1331fcf..03901b2 100644
25933--- a/arch/x86/pci/common.c
25934+++ b/arch/x86/pci/common.c
25935@@ -31,8 +31,8 @@ int noioapicreroute = 1;
25936 int pcibios_last_bus = -1;
25937 unsigned long pirq_table_addr;
25938 struct pci_bus *pci_root_bus;
25939-struct pci_raw_ops *raw_pci_ops;
25940-struct pci_raw_ops *raw_pci_ext_ops;
25941+const struct pci_raw_ops *raw_pci_ops;
25942+const struct pci_raw_ops *raw_pci_ext_ops;
25943
25944 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
25945 int reg, int len, u32 *val)
25946diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
25947index 347d882..4baf6b6 100644
25948--- a/arch/x86/pci/direct.c
25949+++ b/arch/x86/pci/direct.c
25950@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
25951
25952 #undef PCI_CONF1_ADDRESS
25953
25954-struct pci_raw_ops pci_direct_conf1 = {
25955+const struct pci_raw_ops pci_direct_conf1 = {
25956 .read = pci_conf1_read,
25957 .write = pci_conf1_write,
25958 };
25959@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
25960
25961 #undef PCI_CONF2_ADDRESS
25962
25963-struct pci_raw_ops pci_direct_conf2 = {
25964+const struct pci_raw_ops pci_direct_conf2 = {
25965 .read = pci_conf2_read,
25966 .write = pci_conf2_write,
25967 };
25968@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
25969 * This should be close to trivial, but it isn't, because there are buggy
25970 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
25971 */
25972-static int __init pci_sanity_check(struct pci_raw_ops *o)
25973+static int __init pci_sanity_check(const struct pci_raw_ops *o)
25974 {
25975 u32 x = 0;
25976 int year, devfn;
25977diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
25978index f10a7e9..0425342 100644
25979--- a/arch/x86/pci/mmconfig_32.c
25980+++ b/arch/x86/pci/mmconfig_32.c
25981@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
25982 return 0;
25983 }
25984
25985-static struct pci_raw_ops pci_mmcfg = {
25986+static const struct pci_raw_ops pci_mmcfg = {
25987 .read = pci_mmcfg_read,
25988 .write = pci_mmcfg_write,
25989 };
25990diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
25991index 94349f8..41600a7 100644
25992--- a/arch/x86/pci/mmconfig_64.c
25993+++ b/arch/x86/pci/mmconfig_64.c
25994@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
25995 return 0;
25996 }
25997
25998-static struct pci_raw_ops pci_mmcfg = {
25999+static const struct pci_raw_ops pci_mmcfg = {
26000 .read = pci_mmcfg_read,
26001 .write = pci_mmcfg_write,
26002 };
26003diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26004index 8eb295e..86bd657 100644
26005--- a/arch/x86/pci/numaq_32.c
26006+++ b/arch/x86/pci/numaq_32.c
26007@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26008
26009 #undef PCI_CONF1_MQ_ADDRESS
26010
26011-static struct pci_raw_ops pci_direct_conf1_mq = {
26012+static const struct pci_raw_ops pci_direct_conf1_mq = {
26013 .read = pci_conf1_mq_read,
26014 .write = pci_conf1_mq_write
26015 };
26016diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26017index b889d82..5a58a0a 100644
26018--- a/arch/x86/pci/olpc.c
26019+++ b/arch/x86/pci/olpc.c
26020@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26021 return 0;
26022 }
26023
26024-static struct pci_raw_ops pci_olpc_conf = {
26025+static const struct pci_raw_ops pci_olpc_conf = {
26026 .read = pci_olpc_read,
26027 .write = pci_olpc_write,
26028 };
26029diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26030index 1c975cc..ffd0536 100644
26031--- a/arch/x86/pci/pcbios.c
26032+++ b/arch/x86/pci/pcbios.c
26033@@ -56,50 +56,93 @@ union bios32 {
26034 static struct {
26035 unsigned long address;
26036 unsigned short segment;
26037-} bios32_indirect = { 0, __KERNEL_CS };
26038+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26039
26040 /*
26041 * Returns the entry point for the given service, NULL on error
26042 */
26043
26044-static unsigned long bios32_service(unsigned long service)
26045+static unsigned long __devinit bios32_service(unsigned long service)
26046 {
26047 unsigned char return_code; /* %al */
26048 unsigned long address; /* %ebx */
26049 unsigned long length; /* %ecx */
26050 unsigned long entry; /* %edx */
26051 unsigned long flags;
26052+ struct desc_struct d, *gdt;
26053
26054 local_irq_save(flags);
26055- __asm__("lcall *(%%edi); cld"
26056+
26057+ gdt = get_cpu_gdt_table(smp_processor_id());
26058+
26059+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26060+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26061+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26062+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26063+
26064+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26065 : "=a" (return_code),
26066 "=b" (address),
26067 "=c" (length),
26068 "=d" (entry)
26069 : "0" (service),
26070 "1" (0),
26071- "D" (&bios32_indirect));
26072+ "D" (&bios32_indirect),
26073+ "r"(__PCIBIOS_DS)
26074+ : "memory");
26075+
26076+ pax_open_kernel();
26077+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26078+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26079+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26080+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26081+ pax_close_kernel();
26082+
26083 local_irq_restore(flags);
26084
26085 switch (return_code) {
26086- case 0:
26087- return address + entry;
26088- case 0x80: /* Not present */
26089- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26090- return 0;
26091- default: /* Shouldn't happen */
26092- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26093- service, return_code);
26094+ case 0: {
26095+ int cpu;
26096+ unsigned char flags;
26097+
26098+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26099+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26100+ printk(KERN_WARNING "bios32_service: not valid\n");
26101 return 0;
26102+ }
26103+ address = address + PAGE_OFFSET;
26104+ length += 16UL; /* some BIOSs underreport this... */
26105+ flags = 4;
26106+ if (length >= 64*1024*1024) {
26107+ length >>= PAGE_SHIFT;
26108+ flags |= 8;
26109+ }
26110+
26111+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
26112+ gdt = get_cpu_gdt_table(cpu);
26113+ pack_descriptor(&d, address, length, 0x9b, flags);
26114+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26115+ pack_descriptor(&d, address, length, 0x93, flags);
26116+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26117+ }
26118+ return entry;
26119+ }
26120+ case 0x80: /* Not present */
26121+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26122+ return 0;
26123+ default: /* Shouldn't happen */
26124+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26125+ service, return_code);
26126+ return 0;
26127 }
26128 }
26129
26130 static struct {
26131 unsigned long address;
26132 unsigned short segment;
26133-} pci_indirect = { 0, __KERNEL_CS };
26134+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26135
26136-static int pci_bios_present;
26137+static int pci_bios_present __read_only;
26138
26139 static int __devinit check_pcibios(void)
26140 {
26141@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
26142 unsigned long flags, pcibios_entry;
26143
26144 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26145- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26146+ pci_indirect.address = pcibios_entry;
26147
26148 local_irq_save(flags);
26149- __asm__(
26150- "lcall *(%%edi); cld\n\t"
26151+ __asm__("movw %w6, %%ds\n\t"
26152+ "lcall *%%ss:(%%edi); cld\n\t"
26153+ "push %%ss\n\t"
26154+ "pop %%ds\n\t"
26155 "jc 1f\n\t"
26156 "xor %%ah, %%ah\n"
26157 "1:"
26158@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
26159 "=b" (ebx),
26160 "=c" (ecx)
26161 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26162- "D" (&pci_indirect)
26163+ "D" (&pci_indirect),
26164+ "r" (__PCIBIOS_DS)
26165 : "memory");
26166 local_irq_restore(flags);
26167
26168@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26169
26170 switch (len) {
26171 case 1:
26172- __asm__("lcall *(%%esi); cld\n\t"
26173+ __asm__("movw %w6, %%ds\n\t"
26174+ "lcall *%%ss:(%%esi); cld\n\t"
26175+ "push %%ss\n\t"
26176+ "pop %%ds\n\t"
26177 "jc 1f\n\t"
26178 "xor %%ah, %%ah\n"
26179 "1:"
26180@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26181 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26182 "b" (bx),
26183 "D" ((long)reg),
26184- "S" (&pci_indirect));
26185+ "S" (&pci_indirect),
26186+ "r" (__PCIBIOS_DS));
26187 /*
26188 * Zero-extend the result beyond 8 bits, do not trust the
26189 * BIOS having done it:
26190@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26191 *value &= 0xff;
26192 break;
26193 case 2:
26194- __asm__("lcall *(%%esi); cld\n\t"
26195+ __asm__("movw %w6, %%ds\n\t"
26196+ "lcall *%%ss:(%%esi); cld\n\t"
26197+ "push %%ss\n\t"
26198+ "pop %%ds\n\t"
26199 "jc 1f\n\t"
26200 "xor %%ah, %%ah\n"
26201 "1:"
26202@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26203 : "1" (PCIBIOS_READ_CONFIG_WORD),
26204 "b" (bx),
26205 "D" ((long)reg),
26206- "S" (&pci_indirect));
26207+ "S" (&pci_indirect),
26208+ "r" (__PCIBIOS_DS));
26209 /*
26210 * Zero-extend the result beyond 16 bits, do not trust the
26211 * BIOS having done it:
26212@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26213 *value &= 0xffff;
26214 break;
26215 case 4:
26216- __asm__("lcall *(%%esi); cld\n\t"
26217+ __asm__("movw %w6, %%ds\n\t"
26218+ "lcall *%%ss:(%%esi); cld\n\t"
26219+ "push %%ss\n\t"
26220+ "pop %%ds\n\t"
26221 "jc 1f\n\t"
26222 "xor %%ah, %%ah\n"
26223 "1:"
26224@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26225 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26226 "b" (bx),
26227 "D" ((long)reg),
26228- "S" (&pci_indirect));
26229+ "S" (&pci_indirect),
26230+ "r" (__PCIBIOS_DS));
26231 break;
26232 }
26233
26234@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26235
26236 switch (len) {
26237 case 1:
26238- __asm__("lcall *(%%esi); cld\n\t"
26239+ __asm__("movw %w6, %%ds\n\t"
26240+ "lcall *%%ss:(%%esi); cld\n\t"
26241+ "push %%ss\n\t"
26242+ "pop %%ds\n\t"
26243 "jc 1f\n\t"
26244 "xor %%ah, %%ah\n"
26245 "1:"
26246@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26247 "c" (value),
26248 "b" (bx),
26249 "D" ((long)reg),
26250- "S" (&pci_indirect));
26251+ "S" (&pci_indirect),
26252+ "r" (__PCIBIOS_DS));
26253 break;
26254 case 2:
26255- __asm__("lcall *(%%esi); cld\n\t"
26256+ __asm__("movw %w6, %%ds\n\t"
26257+ "lcall *%%ss:(%%esi); cld\n\t"
26258+ "push %%ss\n\t"
26259+ "pop %%ds\n\t"
26260 "jc 1f\n\t"
26261 "xor %%ah, %%ah\n"
26262 "1:"
26263@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26264 "c" (value),
26265 "b" (bx),
26266 "D" ((long)reg),
26267- "S" (&pci_indirect));
26268+ "S" (&pci_indirect),
26269+ "r" (__PCIBIOS_DS));
26270 break;
26271 case 4:
26272- __asm__("lcall *(%%esi); cld\n\t"
26273+ __asm__("movw %w6, %%ds\n\t"
26274+ "lcall *%%ss:(%%esi); cld\n\t"
26275+ "push %%ss\n\t"
26276+ "pop %%ds\n\t"
26277 "jc 1f\n\t"
26278 "xor %%ah, %%ah\n"
26279 "1:"
26280@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26281 "c" (value),
26282 "b" (bx),
26283 "D" ((long)reg),
26284- "S" (&pci_indirect));
26285+ "S" (&pci_indirect),
26286+ "r" (__PCIBIOS_DS));
26287 break;
26288 }
26289
26290@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26291 * Function table for BIOS32 access
26292 */
26293
26294-static struct pci_raw_ops pci_bios_access = {
26295+static const struct pci_raw_ops pci_bios_access = {
26296 .read = pci_bios_read,
26297 .write = pci_bios_write
26298 };
26299@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
26300 * Try to find PCI BIOS.
26301 */
26302
26303-static struct pci_raw_ops * __devinit pci_find_bios(void)
26304+static const struct pci_raw_ops * __devinit pci_find_bios(void)
26305 {
26306 union bios32 *check;
26307 unsigned char sum;
26308@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26309
26310 DBG("PCI: Fetching IRQ routing table... ");
26311 __asm__("push %%es\n\t"
26312+ "movw %w8, %%ds\n\t"
26313 "push %%ds\n\t"
26314 "pop %%es\n\t"
26315- "lcall *(%%esi); cld\n\t"
26316+ "lcall *%%ss:(%%esi); cld\n\t"
26317 "pop %%es\n\t"
26318+ "push %%ss\n\t"
26319+ "pop %%ds\n"
26320 "jc 1f\n\t"
26321 "xor %%ah, %%ah\n"
26322 "1:"
26323@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26324 "1" (0),
26325 "D" ((long) &opt),
26326 "S" (&pci_indirect),
26327- "m" (opt)
26328+ "m" (opt),
26329+ "r" (__PCIBIOS_DS)
26330 : "memory");
26331 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26332 if (ret & 0xff00)
26333@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26334 {
26335 int ret;
26336
26337- __asm__("lcall *(%%esi); cld\n\t"
26338+ __asm__("movw %w5, %%ds\n\t"
26339+ "lcall *%%ss:(%%esi); cld\n\t"
26340+ "push %%ss\n\t"
26341+ "pop %%ds\n"
26342 "jc 1f\n\t"
26343 "xor %%ah, %%ah\n"
26344 "1:"
26345@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26346 : "0" (PCIBIOS_SET_PCI_HW_INT),
26347 "b" ((dev->bus->number << 8) | dev->devfn),
26348 "c" ((irq << 8) | (pin + 10)),
26349- "S" (&pci_indirect));
26350+ "S" (&pci_indirect),
26351+ "r" (__PCIBIOS_DS));
26352 return !(ret & 0xff00);
26353 }
26354 EXPORT_SYMBOL(pcibios_set_irq_routing);
26355diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26356index fa0f651..9d8f3d9 100644
26357--- a/arch/x86/power/cpu.c
26358+++ b/arch/x86/power/cpu.c
26359@@ -129,7 +129,7 @@ static void do_fpu_end(void)
26360 static void fix_processor_context(void)
26361 {
26362 int cpu = smp_processor_id();
26363- struct tss_struct *t = &per_cpu(init_tss, cpu);
26364+ struct tss_struct *t = init_tss + cpu;
26365
26366 set_tss_desc(cpu, t); /*
26367 * This just modifies memory; should not be
26368@@ -139,7 +139,9 @@ static void fix_processor_context(void)
26369 */
26370
26371 #ifdef CONFIG_X86_64
26372+ pax_open_kernel();
26373 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26374+ pax_close_kernel();
26375
26376 syscall_init(); /* This sets MSR_*STAR and related */
26377 #endif
26378diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26379index dd78ef6..f9d928d 100644
26380--- a/arch/x86/vdso/Makefile
26381+++ b/arch/x86/vdso/Makefile
26382@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
26383 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
26384 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
26385
26386-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26387+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26388 GCOV_PROFILE := n
26389
26390 #
26391diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
26392index ee55754..0013b2e 100644
26393--- a/arch/x86/vdso/vclock_gettime.c
26394+++ b/arch/x86/vdso/vclock_gettime.c
26395@@ -22,24 +22,48 @@
26396 #include <asm/hpet.h>
26397 #include <asm/unistd.h>
26398 #include <asm/io.h>
26399+#include <asm/fixmap.h>
26400 #include "vextern.h"
26401
26402 #define gtod vdso_vsyscall_gtod_data
26403
26404+notrace noinline long __vdso_fallback_time(long *t)
26405+{
26406+ long secs;
26407+ asm volatile("syscall"
26408+ : "=a" (secs)
26409+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
26410+ return secs;
26411+}
26412+
26413 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
26414 {
26415 long ret;
26416 asm("syscall" : "=a" (ret) :
26417- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
26418+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
26419 return ret;
26420 }
26421
26422+notrace static inline cycle_t __vdso_vread_hpet(void)
26423+{
26424+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
26425+}
26426+
26427+notrace static inline cycle_t __vdso_vread_tsc(void)
26428+{
26429+ cycle_t ret = (cycle_t)vget_cycles();
26430+
26431+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
26432+}
26433+
26434 notrace static inline long vgetns(void)
26435 {
26436 long v;
26437- cycles_t (*vread)(void);
26438- vread = gtod->clock.vread;
26439- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
26440+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
26441+ v = __vdso_vread_tsc();
26442+ else
26443+ v = __vdso_vread_hpet();
26444+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
26445 return (v * gtod->clock.mult) >> gtod->clock.shift;
26446 }
26447
26448@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
26449
26450 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26451 {
26452- if (likely(gtod->sysctl_enabled))
26453+ if (likely(gtod->sysctl_enabled &&
26454+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26455+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26456 switch (clock) {
26457 case CLOCK_REALTIME:
26458 if (likely(gtod->clock.vread))
26459@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26460 int clock_gettime(clockid_t, struct timespec *)
26461 __attribute__((weak, alias("__vdso_clock_gettime")));
26462
26463-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26464+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
26465 {
26466 long ret;
26467- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
26468+ asm("syscall" : "=a" (ret) :
26469+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
26470+ return ret;
26471+}
26472+
26473+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26474+{
26475+ if (likely(gtod->sysctl_enabled &&
26476+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26477+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26478+ {
26479 if (likely(tv != NULL)) {
26480 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
26481 offsetof(struct timespec, tv_nsec) ||
26482@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26483 }
26484 return 0;
26485 }
26486- asm("syscall" : "=a" (ret) :
26487- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
26488- return ret;
26489+ return __vdso_fallback_gettimeofday(tv, tz);
26490 }
26491 int gettimeofday(struct timeval *, struct timezone *)
26492 __attribute__((weak, alias("__vdso_gettimeofday")));
26493diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
26494index 4e5dd3b..00ba15e 100644
26495--- a/arch/x86/vdso/vdso.lds.S
26496+++ b/arch/x86/vdso/vdso.lds.S
26497@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
26498 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
26499 #include "vextern.h"
26500 #undef VEXTERN
26501+
26502+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
26503+VEXTERN(fallback_gettimeofday)
26504+VEXTERN(fallback_time)
26505+VEXTERN(getcpu)
26506+#undef VEXTERN
26507diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26508index 58bc00f..d53fb48 100644
26509--- a/arch/x86/vdso/vdso32-setup.c
26510+++ b/arch/x86/vdso/vdso32-setup.c
26511@@ -25,6 +25,7 @@
26512 #include <asm/tlbflush.h>
26513 #include <asm/vdso.h>
26514 #include <asm/proto.h>
26515+#include <asm/mman.h>
26516
26517 enum {
26518 VDSO_DISABLED = 0,
26519@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26520 void enable_sep_cpu(void)
26521 {
26522 int cpu = get_cpu();
26523- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26524+ struct tss_struct *tss = init_tss + cpu;
26525
26526 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26527 put_cpu();
26528@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26529 gate_vma.vm_start = FIXADDR_USER_START;
26530 gate_vma.vm_end = FIXADDR_USER_END;
26531 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26532- gate_vma.vm_page_prot = __P101;
26533+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26534 /*
26535 * Make sure the vDSO gets into every core dump.
26536 * Dumping its contents makes post-mortem fully interpretable later
26537@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26538 if (compat)
26539 addr = VDSO_HIGH_BASE;
26540 else {
26541- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26542+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26543 if (IS_ERR_VALUE(addr)) {
26544 ret = addr;
26545 goto up_fail;
26546 }
26547 }
26548
26549- current->mm->context.vdso = (void *)addr;
26550+ current->mm->context.vdso = addr;
26551
26552 if (compat_uses_vma || !compat) {
26553 /*
26554@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26555 }
26556
26557 current_thread_info()->sysenter_return =
26558- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26559+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26560
26561 up_fail:
26562 if (ret)
26563- current->mm->context.vdso = NULL;
26564+ current->mm->context.vdso = 0;
26565
26566 up_write(&mm->mmap_sem);
26567
26568@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
26569
26570 const char *arch_vma_name(struct vm_area_struct *vma)
26571 {
26572- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26573+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26574 return "[vdso]";
26575+
26576+#ifdef CONFIG_PAX_SEGMEXEC
26577+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26578+ return "[vdso]";
26579+#endif
26580+
26581 return NULL;
26582 }
26583
26584@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
26585 struct mm_struct *mm = tsk->mm;
26586
26587 /* Check to see if this task was created in compat vdso mode */
26588- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26589+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26590 return &gate_vma;
26591 return NULL;
26592 }
26593diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
26594index 1683ba2..48d07f3 100644
26595--- a/arch/x86/vdso/vextern.h
26596+++ b/arch/x86/vdso/vextern.h
26597@@ -11,6 +11,5 @@
26598 put into vextern.h and be referenced as a pointer with vdso prefix.
26599 The main kernel later fills in the values. */
26600
26601-VEXTERN(jiffies)
26602 VEXTERN(vgetcpu_mode)
26603 VEXTERN(vsyscall_gtod_data)
26604diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26605index 21e1aeb..2c0b3c4 100644
26606--- a/arch/x86/vdso/vma.c
26607+++ b/arch/x86/vdso/vma.c
26608@@ -17,8 +17,6 @@
26609 #include "vextern.h" /* Just for VMAGIC. */
26610 #undef VEXTERN
26611
26612-unsigned int __read_mostly vdso_enabled = 1;
26613-
26614 extern char vdso_start[], vdso_end[];
26615 extern unsigned short vdso_sync_cpuid;
26616
26617@@ -27,10 +25,8 @@ static unsigned vdso_size;
26618
26619 static inline void *var_ref(void *p, char *name)
26620 {
26621- if (*(void **)p != (void *)VMAGIC) {
26622- printk("VDSO: variable %s broken\n", name);
26623- vdso_enabled = 0;
26624- }
26625+ if (*(void **)p != (void *)VMAGIC)
26626+ panic("VDSO: variable %s broken\n", name);
26627 return p;
26628 }
26629
26630@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
26631 if (!vbase)
26632 goto oom;
26633
26634- if (memcmp(vbase, "\177ELF", 4)) {
26635- printk("VDSO: I'm broken; not ELF\n");
26636- vdso_enabled = 0;
26637- }
26638+ if (memcmp(vbase, ELFMAG, SELFMAG))
26639+ panic("VDSO: I'm broken; not ELF\n");
26640
26641 #define VEXTERN(x) \
26642 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
26643 #include "vextern.h"
26644 #undef VEXTERN
26645+ vunmap(vbase);
26646 return 0;
26647
26648 oom:
26649- printk("Cannot allocate vdso\n");
26650- vdso_enabled = 0;
26651- return -ENOMEM;
26652+ panic("Cannot allocate vdso\n");
26653 }
26654 __initcall(init_vdso_vars);
26655
26656@@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26657 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26658 {
26659 struct mm_struct *mm = current->mm;
26660- unsigned long addr;
26661+ unsigned long addr = 0;
26662 int ret;
26663
26664- if (!vdso_enabled)
26665- return 0;
26666-
26667 down_write(&mm->mmap_sem);
26668+
26669+#ifdef CONFIG_PAX_RANDMMAP
26670+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26671+#endif
26672+
26673 addr = vdso_addr(mm->start_stack, vdso_size);
26674 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26675 if (IS_ERR_VALUE(addr)) {
26676@@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26677 goto up_fail;
26678 }
26679
26680- current->mm->context.vdso = (void *)addr;
26681+ current->mm->context.vdso = addr;
26682
26683 ret = install_special_mapping(mm, addr, vdso_size,
26684 VM_READ|VM_EXEC|
26685@@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26686 VM_ALWAYSDUMP,
26687 vdso_pages);
26688 if (ret) {
26689- current->mm->context.vdso = NULL;
26690+ current->mm->context.vdso = 0;
26691 goto up_fail;
26692 }
26693
26694@@ -132,10 +127,3 @@ up_fail:
26695 up_write(&mm->mmap_sem);
26696 return ret;
26697 }
26698-
26699-static __init int vdso_setup(char *s)
26700-{
26701- vdso_enabled = simple_strtoul(s, NULL, 0);
26702- return 0;
26703-}
26704-__setup("vdso=", vdso_setup);
26705diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26706index 0087b00..eecb34f 100644
26707--- a/arch/x86/xen/enlighten.c
26708+++ b/arch/x86/xen/enlighten.c
26709@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26710
26711 struct shared_info xen_dummy_shared_info;
26712
26713-void *xen_initial_gdt;
26714-
26715 /*
26716 * Point at some empty memory to start with. We map the real shared_info
26717 * page as soon as fixmap is up and running.
26718@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
26719
26720 preempt_disable();
26721
26722- start = __get_cpu_var(idt_desc).address;
26723+ start = (unsigned long)__get_cpu_var(idt_desc).address;
26724 end = start + __get_cpu_var(idt_desc).size + 1;
26725
26726 xen_mc_flush();
26727@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
26728 #endif
26729 };
26730
26731-static void xen_reboot(int reason)
26732+static __noreturn void xen_reboot(int reason)
26733 {
26734 struct sched_shutdown r = { .reason = reason };
26735
26736@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
26737 BUG();
26738 }
26739
26740-static void xen_restart(char *msg)
26741+static __noreturn void xen_restart(char *msg)
26742 {
26743 xen_reboot(SHUTDOWN_reboot);
26744 }
26745
26746-static void xen_emergency_restart(void)
26747+static __noreturn void xen_emergency_restart(void)
26748 {
26749 xen_reboot(SHUTDOWN_reboot);
26750 }
26751
26752-static void xen_machine_halt(void)
26753+static __noreturn void xen_machine_halt(void)
26754 {
26755 xen_reboot(SHUTDOWN_poweroff);
26756 }
26757@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
26758 */
26759 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26760
26761-#ifdef CONFIG_X86_64
26762 /* Work out if we support NX */
26763- check_efer();
26764+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26765+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26766+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26767+ unsigned l, h;
26768+
26769+#ifdef CONFIG_X86_PAE
26770+ nx_enabled = 1;
26771+#endif
26772+ __supported_pte_mask |= _PAGE_NX;
26773+ rdmsr(MSR_EFER, l, h);
26774+ l |= EFER_NX;
26775+ wrmsr(MSR_EFER, l, h);
26776+ }
26777 #endif
26778
26779 xen_setup_features();
26780@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
26781
26782 machine_ops = xen_machine_ops;
26783
26784- /*
26785- * The only reliable way to retain the initial address of the
26786- * percpu gdt_page is to remember it here, so we can go and
26787- * mark it RW later, when the initial percpu area is freed.
26788- */
26789- xen_initial_gdt = &per_cpu(gdt_page, 0);
26790-
26791 xen_smp_init();
26792
26793 pgd = (pgd_t *)xen_start_info->pt_base;
26794diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
26795index 3f90a2c..ee0d992 100644
26796--- a/arch/x86/xen/mmu.c
26797+++ b/arch/x86/xen/mmu.c
26798@@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
26799 convert_pfn_mfn(init_level4_pgt);
26800 convert_pfn_mfn(level3_ident_pgt);
26801 convert_pfn_mfn(level3_kernel_pgt);
26802+ convert_pfn_mfn(level3_vmalloc_pgt);
26803+ convert_pfn_mfn(level3_vmemmap_pgt);
26804
26805 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
26806 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
26807@@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
26808 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
26809 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
26810 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
26811+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
26812+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
26813 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
26814+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
26815 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
26816 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
26817
26818@@ -1860,6 +1865,7 @@ static __init void xen_post_allocator_init(void)
26819 pv_mmu_ops.set_pud = xen_set_pud;
26820 #if PAGETABLE_LEVELS == 4
26821 pv_mmu_ops.set_pgd = xen_set_pgd;
26822+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
26823 #endif
26824
26825 /* This will work as long as patching hasn't happened yet
26826@@ -1946,6 +1952,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
26827 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
26828 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
26829 .set_pgd = xen_set_pgd_hyper,
26830+ .set_pgd_batched = xen_set_pgd_hyper,
26831
26832 .alloc_pud = xen_alloc_pmd_init,
26833 .release_pud = xen_release_pmd_init,
26834diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
26835index a96204a..fca9b8e 100644
26836--- a/arch/x86/xen/smp.c
26837+++ b/arch/x86/xen/smp.c
26838@@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
26839 {
26840 BUG_ON(smp_processor_id() != 0);
26841 native_smp_prepare_boot_cpu();
26842-
26843- /* We've switched to the "real" per-cpu gdt, so make sure the
26844- old memory can be recycled */
26845- make_lowmem_page_readwrite(xen_initial_gdt);
26846-
26847 xen_setup_vcpu_info_placement();
26848 }
26849
26850@@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
26851 gdt = get_cpu_gdt_table(cpu);
26852
26853 ctxt->flags = VGCF_IN_KERNEL;
26854- ctxt->user_regs.ds = __USER_DS;
26855- ctxt->user_regs.es = __USER_DS;
26856+ ctxt->user_regs.ds = __KERNEL_DS;
26857+ ctxt->user_regs.es = __KERNEL_DS;
26858 ctxt->user_regs.ss = __KERNEL_DS;
26859 #ifdef CONFIG_X86_32
26860 ctxt->user_regs.fs = __KERNEL_PERCPU;
26861- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
26862+ savesegment(gs, ctxt->user_regs.gs);
26863 #else
26864 ctxt->gs_base_kernel = per_cpu_offset(cpu);
26865 #endif
26866@@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
26867 int rc;
26868
26869 per_cpu(current_task, cpu) = idle;
26870+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
26871 #ifdef CONFIG_X86_32
26872 irq_ctx_init(cpu);
26873 #else
26874 clear_tsk_thread_flag(idle, TIF_FORK);
26875- per_cpu(kernel_stack, cpu) =
26876- (unsigned long)task_stack_page(idle) -
26877- KERNEL_STACK_OFFSET + THREAD_SIZE;
26878+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26879 #endif
26880 xen_setup_runstate_info(cpu);
26881 xen_setup_timer(cpu);
26882diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
26883index 9a95a9c..4f39e774 100644
26884--- a/arch/x86/xen/xen-asm_32.S
26885+++ b/arch/x86/xen/xen-asm_32.S
26886@@ -83,14 +83,14 @@ ENTRY(xen_iret)
26887 ESP_OFFSET=4 # bytes pushed onto stack
26888
26889 /*
26890- * Store vcpu_info pointer for easy access. Do it this way to
26891- * avoid having to reload %fs
26892+ * Store vcpu_info pointer for easy access.
26893 */
26894 #ifdef CONFIG_SMP
26895- GET_THREAD_INFO(%eax)
26896- movl TI_cpu(%eax), %eax
26897- movl __per_cpu_offset(,%eax,4), %eax
26898- mov per_cpu__xen_vcpu(%eax), %eax
26899+ push %fs
26900+ mov $(__KERNEL_PERCPU), %eax
26901+ mov %eax, %fs
26902+ mov PER_CPU_VAR(xen_vcpu), %eax
26903+ pop %fs
26904 #else
26905 movl per_cpu__xen_vcpu, %eax
26906 #endif
26907diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
26908index 1a5ff24..a187d40 100644
26909--- a/arch/x86/xen/xen-head.S
26910+++ b/arch/x86/xen/xen-head.S
26911@@ -19,6 +19,17 @@ ENTRY(startup_xen)
26912 #ifdef CONFIG_X86_32
26913 mov %esi,xen_start_info
26914 mov $init_thread_union+THREAD_SIZE,%esp
26915+#ifdef CONFIG_SMP
26916+ movl $cpu_gdt_table,%edi
26917+ movl $__per_cpu_load,%eax
26918+ movw %ax,__KERNEL_PERCPU + 2(%edi)
26919+ rorl $16,%eax
26920+ movb %al,__KERNEL_PERCPU + 4(%edi)
26921+ movb %ah,__KERNEL_PERCPU + 7(%edi)
26922+ movl $__per_cpu_end - 1,%eax
26923+ subl $__per_cpu_start,%eax
26924+ movw %ax,__KERNEL_PERCPU + 0(%edi)
26925+#endif
26926 #else
26927 mov %rsi,xen_start_info
26928 mov $init_thread_union+THREAD_SIZE,%rsp
26929diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
26930index f9153a3..51eab3d 100644
26931--- a/arch/x86/xen/xen-ops.h
26932+++ b/arch/x86/xen/xen-ops.h
26933@@ -10,8 +10,6 @@
26934 extern const char xen_hypervisor_callback[];
26935 extern const char xen_failsafe_callback[];
26936
26937-extern void *xen_initial_gdt;
26938-
26939 struct trap_info;
26940 void xen_copy_trap_info(struct trap_info *traps);
26941
26942diff --git a/block/blk-integrity.c b/block/blk-integrity.c
26943index 15c6308..96e83c2 100644
26944--- a/block/blk-integrity.c
26945+++ b/block/blk-integrity.c
26946@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
26947 NULL,
26948 };
26949
26950-static struct sysfs_ops integrity_ops = {
26951+static const struct sysfs_ops integrity_ops = {
26952 .show = &integrity_attr_show,
26953 .store = &integrity_attr_store,
26954 };
26955diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
26956index ca56420..f2fc409 100644
26957--- a/block/blk-iopoll.c
26958+++ b/block/blk-iopoll.c
26959@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
26960 }
26961 EXPORT_SYMBOL(blk_iopoll_complete);
26962
26963-static void blk_iopoll_softirq(struct softirq_action *h)
26964+static void blk_iopoll_softirq(void)
26965 {
26966 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
26967 int rearm = 0, budget = blk_iopoll_budget;
26968diff --git a/block/blk-map.c b/block/blk-map.c
26969index 30a7e51..0aeec6a 100644
26970--- a/block/blk-map.c
26971+++ b/block/blk-map.c
26972@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
26973 * direct dma. else, set up kernel bounce buffers
26974 */
26975 uaddr = (unsigned long) ubuf;
26976- if (blk_rq_aligned(q, ubuf, len) && !map_data)
26977+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
26978 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
26979 else
26980 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
26981@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
26982 for (i = 0; i < iov_count; i++) {
26983 unsigned long uaddr = (unsigned long)iov[i].iov_base;
26984
26985+ if (!iov[i].iov_len)
26986+ return -EINVAL;
26987+
26988 if (uaddr & queue_dma_alignment(q)) {
26989 unaligned = 1;
26990 break;
26991 }
26992- if (!iov[i].iov_len)
26993- return -EINVAL;
26994 }
26995
26996 if (unaligned || (q->dma_pad_mask & len) || map_data)
26997@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
26998 if (!len || !kbuf)
26999 return -EINVAL;
27000
27001- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27002+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27003 if (do_copy)
27004 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27005 else
27006diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27007index ee9c216..58d410a 100644
27008--- a/block/blk-softirq.c
27009+++ b/block/blk-softirq.c
27010@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27011 * Softirq action handler - move entries to local list and loop over them
27012 * while passing them to the queue registered handler.
27013 */
27014-static void blk_done_softirq(struct softirq_action *h)
27015+static void blk_done_softirq(void)
27016 {
27017 struct list_head *cpu_list, local_list;
27018
27019diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
27020index bb9c5ea..5330d48 100644
27021--- a/block/blk-sysfs.c
27022+++ b/block/blk-sysfs.c
27023@@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
27024 kmem_cache_free(blk_requestq_cachep, q);
27025 }
27026
27027-static struct sysfs_ops queue_sysfs_ops = {
27028+static const struct sysfs_ops queue_sysfs_ops = {
27029 .show = queue_attr_show,
27030 .store = queue_attr_store,
27031 };
27032diff --git a/block/bsg.c b/block/bsg.c
27033index 7154a7a..08ac2f0 100644
27034--- a/block/bsg.c
27035+++ b/block/bsg.c
27036@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27037 struct sg_io_v4 *hdr, struct bsg_device *bd,
27038 fmode_t has_write_perm)
27039 {
27040+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27041+ unsigned char *cmdptr;
27042+
27043 if (hdr->request_len > BLK_MAX_CDB) {
27044 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27045 if (!rq->cmd)
27046 return -ENOMEM;
27047- }
27048+ cmdptr = rq->cmd;
27049+ } else
27050+ cmdptr = tmpcmd;
27051
27052- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
27053+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27054 hdr->request_len))
27055 return -EFAULT;
27056
27057+ if (cmdptr != rq->cmd)
27058+ memcpy(rq->cmd, cmdptr, hdr->request_len);
27059+
27060 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27061 if (blk_verify_command(rq->cmd, has_write_perm))
27062 return -EPERM;
27063@@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27064 rq->next_rq = next_rq;
27065 next_rq->cmd_type = rq->cmd_type;
27066
27067- dxferp = (void*)(unsigned long)hdr->din_xferp;
27068+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27069 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
27070 hdr->din_xfer_len, GFP_KERNEL);
27071 if (ret)
27072@@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27073
27074 if (hdr->dout_xfer_len) {
27075 dxfer_len = hdr->dout_xfer_len;
27076- dxferp = (void*)(unsigned long)hdr->dout_xferp;
27077+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
27078 } else if (hdr->din_xfer_len) {
27079 dxfer_len = hdr->din_xfer_len;
27080- dxferp = (void*)(unsigned long)hdr->din_xferp;
27081+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27082 } else
27083 dxfer_len = 0;
27084
27085@@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
27086 int len = min_t(unsigned int, hdr->max_response_len,
27087 rq->sense_len);
27088
27089- ret = copy_to_user((void*)(unsigned long)hdr->response,
27090+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
27091 rq->sense, len);
27092 if (!ret)
27093 hdr->response_len = len;
27094diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27095index 9bd086c..ca1fc22 100644
27096--- a/block/compat_ioctl.c
27097+++ b/block/compat_ioctl.c
27098@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27099 err |= __get_user(f->spec1, &uf->spec1);
27100 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27101 err |= __get_user(name, &uf->name);
27102- f->name = compat_ptr(name);
27103+ f->name = (void __force_kernel *)compat_ptr(name);
27104 if (err) {
27105 err = -EFAULT;
27106 goto out;
27107diff --git a/block/elevator.c b/block/elevator.c
27108index a847046..75a1746 100644
27109--- a/block/elevator.c
27110+++ b/block/elevator.c
27111@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
27112 return error;
27113 }
27114
27115-static struct sysfs_ops elv_sysfs_ops = {
27116+static const struct sysfs_ops elv_sysfs_ops = {
27117 .show = elv_attr_show,
27118 .store = elv_attr_store,
27119 };
27120diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27121index 1d5a780..0e2fb8c 100644
27122--- a/block/scsi_ioctl.c
27123+++ b/block/scsi_ioctl.c
27124@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
27125 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27126 struct sg_io_hdr *hdr, fmode_t mode)
27127 {
27128- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27129+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27130+ unsigned char *cmdptr;
27131+
27132+ if (rq->cmd != rq->__cmd)
27133+ cmdptr = rq->cmd;
27134+ else
27135+ cmdptr = tmpcmd;
27136+
27137+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27138 return -EFAULT;
27139+
27140+ if (cmdptr != rq->cmd)
27141+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27142+
27143 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27144 return -EPERM;
27145
27146@@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27147 int err;
27148 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27149 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27150+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27151+ unsigned char *cmdptr;
27152
27153 if (!sic)
27154 return -EINVAL;
27155@@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27156 */
27157 err = -EFAULT;
27158 rq->cmd_len = cmdlen;
27159- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27160+
27161+ if (rq->cmd != rq->__cmd)
27162+ cmdptr = rq->cmd;
27163+ else
27164+ cmdptr = tmpcmd;
27165+
27166+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27167 goto error;
27168
27169+ if (rq->cmd != cmdptr)
27170+ memcpy(rq->cmd, cmdptr, cmdlen);
27171+
27172 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27173 goto error;
27174
27175diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27176index 3533582..f143117 100644
27177--- a/crypto/cryptd.c
27178+++ b/crypto/cryptd.c
27179@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
27180
27181 struct cryptd_blkcipher_request_ctx {
27182 crypto_completion_t complete;
27183-};
27184+} __no_const;
27185
27186 struct cryptd_hash_ctx {
27187 struct crypto_shash *child;
27188diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
27189index a90d260..7a9765e 100644
27190--- a/crypto/gf128mul.c
27191+++ b/crypto/gf128mul.c
27192@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
27193 for (i = 0; i < 7; ++i)
27194 gf128mul_x_lle(&p[i + 1], &p[i]);
27195
27196- memset(r, 0, sizeof(r));
27197+ memset(r, 0, sizeof(*r));
27198 for (i = 0;;) {
27199 u8 ch = ((u8 *)b)[15 - i];
27200
27201@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
27202 for (i = 0; i < 7; ++i)
27203 gf128mul_x_bbe(&p[i + 1], &p[i]);
27204
27205- memset(r, 0, sizeof(r));
27206+ memset(r, 0, sizeof(*r));
27207 for (i = 0;;) {
27208 u8 ch = ((u8 *)b)[i];
27209
27210diff --git a/crypto/serpent.c b/crypto/serpent.c
27211index b651a55..023297d 100644
27212--- a/crypto/serpent.c
27213+++ b/crypto/serpent.c
27214@@ -21,6 +21,7 @@
27215 #include <asm/byteorder.h>
27216 #include <linux/crypto.h>
27217 #include <linux/types.h>
27218+#include <linux/sched.h>
27219
27220 /* Key is padded to the maximum of 256 bits before round key generation.
27221 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
27222@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
27223 u32 r0,r1,r2,r3,r4;
27224 int i;
27225
27226+ pax_track_stack();
27227+
27228 /* Copy key, add padding */
27229
27230 for (i = 0; i < keylen; ++i)
27231diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
27232index 0d2cdb8..d8de48d 100644
27233--- a/drivers/acpi/acpi_pad.c
27234+++ b/drivers/acpi/acpi_pad.c
27235@@ -30,7 +30,7 @@
27236 #include <acpi/acpi_bus.h>
27237 #include <acpi/acpi_drivers.h>
27238
27239-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
27240+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
27241 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27242 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27243 static DEFINE_MUTEX(isolated_cpus_lock);
27244diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27245index 3f4602b..2e41d36 100644
27246--- a/drivers/acpi/battery.c
27247+++ b/drivers/acpi/battery.c
27248@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
27249 }
27250
27251 static struct battery_file {
27252- struct file_operations ops;
27253+ const struct file_operations ops;
27254 mode_t mode;
27255 const char *name;
27256 } acpi_battery_file[] = {
27257diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
27258index 7338b6a..82f0257 100644
27259--- a/drivers/acpi/dock.c
27260+++ b/drivers/acpi/dock.c
27261@@ -77,7 +77,7 @@ struct dock_dependent_device {
27262 struct list_head list;
27263 struct list_head hotplug_list;
27264 acpi_handle handle;
27265- struct acpi_dock_ops *ops;
27266+ const struct acpi_dock_ops *ops;
27267 void *context;
27268 };
27269
27270@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
27271 * the dock driver after _DCK is executed.
27272 */
27273 int
27274-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
27275+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
27276 void *context)
27277 {
27278 struct dock_dependent_device *dd;
27279diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
27280index 7c1c59e..2993595 100644
27281--- a/drivers/acpi/osl.c
27282+++ b/drivers/acpi/osl.c
27283@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
27284 void __iomem *virt_addr;
27285
27286 virt_addr = ioremap(phys_addr, width);
27287+ if (!virt_addr)
27288+ return AE_NO_MEMORY;
27289 if (!value)
27290 value = &dummy;
27291
27292@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
27293 void __iomem *virt_addr;
27294
27295 virt_addr = ioremap(phys_addr, width);
27296+ if (!virt_addr)
27297+ return AE_NO_MEMORY;
27298
27299 switch (width) {
27300 case 8:
27301diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
27302index c216062..eec10d2 100644
27303--- a/drivers/acpi/power_meter.c
27304+++ b/drivers/acpi/power_meter.c
27305@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
27306 return res;
27307
27308 temp /= 1000;
27309- if (temp < 0)
27310- return -EINVAL;
27311
27312 mutex_lock(&resource->lock);
27313 resource->trip[attr->index - 7] = temp;
27314diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27315index d0d25e2..961643d 100644
27316--- a/drivers/acpi/proc.c
27317+++ b/drivers/acpi/proc.c
27318@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
27319 size_t count, loff_t * ppos)
27320 {
27321 struct list_head *node, *next;
27322- char strbuf[5];
27323- char str[5] = "";
27324- unsigned int len = count;
27325+ char strbuf[5] = {0};
27326 struct acpi_device *found_dev = NULL;
27327
27328- if (len > 4)
27329- len = 4;
27330- if (len < 0)
27331- return -EFAULT;
27332+ if (count > 4)
27333+ count = 4;
27334
27335- if (copy_from_user(strbuf, buffer, len))
27336+ if (copy_from_user(strbuf, buffer, count))
27337 return -EFAULT;
27338- strbuf[len] = '\0';
27339- sscanf(strbuf, "%s", str);
27340+ strbuf[count] = '\0';
27341
27342 mutex_lock(&acpi_device_lock);
27343 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27344@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
27345 if (!dev->wakeup.flags.valid)
27346 continue;
27347
27348- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27349+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27350 dev->wakeup.state.enabled =
27351 dev->wakeup.state.enabled ? 0 : 1;
27352 found_dev = dev;
27353diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
27354index 7102474..de8ad22 100644
27355--- a/drivers/acpi/processor_core.c
27356+++ b/drivers/acpi/processor_core.c
27357@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27358 return 0;
27359 }
27360
27361- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27362+ BUG_ON(pr->id >= nr_cpu_ids);
27363
27364 /*
27365 * Buggy BIOS check
27366diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
27367index d933980..5761f13 100644
27368--- a/drivers/acpi/sbshc.c
27369+++ b/drivers/acpi/sbshc.c
27370@@ -17,7 +17,7 @@
27371
27372 #define PREFIX "ACPI: "
27373
27374-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
27375+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
27376 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
27377
27378 struct acpi_smb_hc {
27379diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
27380index 0458094..6978e7b 100644
27381--- a/drivers/acpi/sleep.c
27382+++ b/drivers/acpi/sleep.c
27383@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
27384 }
27385 }
27386
27387-static struct platform_suspend_ops acpi_suspend_ops = {
27388+static const struct platform_suspend_ops acpi_suspend_ops = {
27389 .valid = acpi_suspend_state_valid,
27390 .begin = acpi_suspend_begin,
27391 .prepare_late = acpi_pm_prepare,
27392@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
27393 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27394 * been requested.
27395 */
27396-static struct platform_suspend_ops acpi_suspend_ops_old = {
27397+static const struct platform_suspend_ops acpi_suspend_ops_old = {
27398 .valid = acpi_suspend_state_valid,
27399 .begin = acpi_suspend_begin_old,
27400 .prepare_late = acpi_pm_disable_gpes,
27401@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
27402 acpi_enable_all_runtime_gpes();
27403 }
27404
27405-static struct platform_hibernation_ops acpi_hibernation_ops = {
27406+static const struct platform_hibernation_ops acpi_hibernation_ops = {
27407 .begin = acpi_hibernation_begin,
27408 .end = acpi_pm_end,
27409 .pre_snapshot = acpi_hibernation_pre_snapshot,
27410@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
27411 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27412 * been requested.
27413 */
27414-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
27415+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
27416 .begin = acpi_hibernation_begin_old,
27417 .end = acpi_pm_end,
27418 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
27419diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
27420index 05dff63..b662ab7 100644
27421--- a/drivers/acpi/video.c
27422+++ b/drivers/acpi/video.c
27423@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
27424 vd->brightness->levels[request_level]);
27425 }
27426
27427-static struct backlight_ops acpi_backlight_ops = {
27428+static const struct backlight_ops acpi_backlight_ops = {
27429 .get_brightness = acpi_video_get_brightness,
27430 .update_status = acpi_video_set_brightness,
27431 };
27432diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
27433index 6787aab..23ffb0e 100644
27434--- a/drivers/ata/ahci.c
27435+++ b/drivers/ata/ahci.c
27436@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
27437 .sdev_attrs = ahci_sdev_attrs,
27438 };
27439
27440-static struct ata_port_operations ahci_ops = {
27441+static const struct ata_port_operations ahci_ops = {
27442 .inherits = &sata_pmp_port_ops,
27443
27444 .qc_defer = sata_pmp_qc_defer_cmd_switch,
27445@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
27446 .port_stop = ahci_port_stop,
27447 };
27448
27449-static struct ata_port_operations ahci_vt8251_ops = {
27450+static const struct ata_port_operations ahci_vt8251_ops = {
27451 .inherits = &ahci_ops,
27452 .hardreset = ahci_vt8251_hardreset,
27453 };
27454
27455-static struct ata_port_operations ahci_p5wdh_ops = {
27456+static const struct ata_port_operations ahci_p5wdh_ops = {
27457 .inherits = &ahci_ops,
27458 .hardreset = ahci_p5wdh_hardreset,
27459 };
27460
27461-static struct ata_port_operations ahci_sb600_ops = {
27462+static const struct ata_port_operations ahci_sb600_ops = {
27463 .inherits = &ahci_ops,
27464 .softreset = ahci_sb600_softreset,
27465 .pmp_softreset = ahci_sb600_softreset,
27466diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
27467index 99e7196..4968c77 100644
27468--- a/drivers/ata/ata_generic.c
27469+++ b/drivers/ata/ata_generic.c
27470@@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
27471 ATA_BMDMA_SHT(DRV_NAME),
27472 };
27473
27474-static struct ata_port_operations generic_port_ops = {
27475+static const struct ata_port_operations generic_port_ops = {
27476 .inherits = &ata_bmdma_port_ops,
27477 .cable_detect = ata_cable_unknown,
27478 .set_mode = generic_set_mode,
27479diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
27480index c33591d..000c121 100644
27481--- a/drivers/ata/ata_piix.c
27482+++ b/drivers/ata/ata_piix.c
27483@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
27484 ATA_BMDMA_SHT(DRV_NAME),
27485 };
27486
27487-static struct ata_port_operations piix_pata_ops = {
27488+static const struct ata_port_operations piix_pata_ops = {
27489 .inherits = &ata_bmdma32_port_ops,
27490 .cable_detect = ata_cable_40wire,
27491 .set_piomode = piix_set_piomode,
27492@@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
27493 .prereset = piix_pata_prereset,
27494 };
27495
27496-static struct ata_port_operations piix_vmw_ops = {
27497+static const struct ata_port_operations piix_vmw_ops = {
27498 .inherits = &piix_pata_ops,
27499 .bmdma_status = piix_vmw_bmdma_status,
27500 };
27501
27502-static struct ata_port_operations ich_pata_ops = {
27503+static const struct ata_port_operations ich_pata_ops = {
27504 .inherits = &piix_pata_ops,
27505 .cable_detect = ich_pata_cable_detect,
27506 .set_dmamode = ich_set_dmamode,
27507 };
27508
27509-static struct ata_port_operations piix_sata_ops = {
27510+static const struct ata_port_operations piix_sata_ops = {
27511 .inherits = &ata_bmdma_port_ops,
27512 };
27513
27514-static struct ata_port_operations piix_sidpr_sata_ops = {
27515+static const struct ata_port_operations piix_sidpr_sata_ops = {
27516 .inherits = &piix_sata_ops,
27517 .hardreset = sata_std_hardreset,
27518 .scr_read = piix_sidpr_scr_read,
27519diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
27520index b0882cd..c295d65 100644
27521--- a/drivers/ata/libata-acpi.c
27522+++ b/drivers/ata/libata-acpi.c
27523@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
27524 ata_acpi_uevent(dev->link->ap, dev, event);
27525 }
27526
27527-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27528+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27529 .handler = ata_acpi_dev_notify_dock,
27530 .uevent = ata_acpi_dev_uevent,
27531 };
27532
27533-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27534+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27535 .handler = ata_acpi_ap_notify_dock,
27536 .uevent = ata_acpi_ap_uevent,
27537 };
27538diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27539index d4f7f99..94f603e 100644
27540--- a/drivers/ata/libata-core.c
27541+++ b/drivers/ata/libata-core.c
27542@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27543 struct ata_port *ap;
27544 unsigned int tag;
27545
27546- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27547+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27548 ap = qc->ap;
27549
27550 qc->flags = 0;
27551@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27552 struct ata_port *ap;
27553 struct ata_link *link;
27554
27555- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27556+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27557 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27558 ap = qc->ap;
27559 link = qc->dev->link;
27560@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
27561 * LOCKING:
27562 * None.
27563 */
27564-static void ata_finalize_port_ops(struct ata_port_operations *ops)
27565+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
27566 {
27567 static DEFINE_SPINLOCK(lock);
27568 const struct ata_port_operations *cur;
27569@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27570 return;
27571
27572 spin_lock(&lock);
27573+ pax_open_kernel();
27574
27575 for (cur = ops->inherits; cur; cur = cur->inherits) {
27576 void **inherit = (void **)cur;
27577@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27578 if (IS_ERR(*pp))
27579 *pp = NULL;
27580
27581- ops->inherits = NULL;
27582+ *(struct ata_port_operations **)&ops->inherits = NULL;
27583
27584+ pax_close_kernel();
27585 spin_unlock(&lock);
27586 }
27587
27588@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
27589 */
27590 /* KILLME - the only user left is ipr */
27591 void ata_host_init(struct ata_host *host, struct device *dev,
27592- unsigned long flags, struct ata_port_operations *ops)
27593+ unsigned long flags, const struct ata_port_operations *ops)
27594 {
27595 spin_lock_init(&host->lock);
27596 host->dev = dev;
27597@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
27598 /* truly dummy */
27599 }
27600
27601-struct ata_port_operations ata_dummy_port_ops = {
27602+const struct ata_port_operations ata_dummy_port_ops = {
27603 .qc_prep = ata_noop_qc_prep,
27604 .qc_issue = ata_dummy_qc_issue,
27605 .error_handler = ata_dummy_error_handler,
27606diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
27607index e5bdb9b..45a8e72 100644
27608--- a/drivers/ata/libata-eh.c
27609+++ b/drivers/ata/libata-eh.c
27610@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
27611 {
27612 struct ata_link *link;
27613
27614+ pax_track_stack();
27615+
27616 ata_for_each_link(link, ap, HOST_FIRST)
27617 ata_eh_link_report(link);
27618 }
27619@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
27620 */
27621 void ata_std_error_handler(struct ata_port *ap)
27622 {
27623- struct ata_port_operations *ops = ap->ops;
27624+ const struct ata_port_operations *ops = ap->ops;
27625 ata_reset_fn_t hardreset = ops->hardreset;
27626
27627 /* ignore built-in hardreset if SCR access is not available */
27628diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
27629index 51f0ffb..19ce3e3 100644
27630--- a/drivers/ata/libata-pmp.c
27631+++ b/drivers/ata/libata-pmp.c
27632@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
27633 */
27634 static int sata_pmp_eh_recover(struct ata_port *ap)
27635 {
27636- struct ata_port_operations *ops = ap->ops;
27637+ const struct ata_port_operations *ops = ap->ops;
27638 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
27639 struct ata_link *pmp_link = &ap->link;
27640 struct ata_device *pmp_dev = pmp_link->device;
27641diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
27642index d8f35fe..288180a 100644
27643--- a/drivers/ata/pata_acpi.c
27644+++ b/drivers/ata/pata_acpi.c
27645@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
27646 ATA_BMDMA_SHT(DRV_NAME),
27647 };
27648
27649-static struct ata_port_operations pacpi_ops = {
27650+static const struct ata_port_operations pacpi_ops = {
27651 .inherits = &ata_bmdma_port_ops,
27652 .qc_issue = pacpi_qc_issue,
27653 .cable_detect = pacpi_cable_detect,
27654diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
27655index 9434114..1f2f364 100644
27656--- a/drivers/ata/pata_ali.c
27657+++ b/drivers/ata/pata_ali.c
27658@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
27659 * Port operations for PIO only ALi
27660 */
27661
27662-static struct ata_port_operations ali_early_port_ops = {
27663+static const struct ata_port_operations ali_early_port_ops = {
27664 .inherits = &ata_sff_port_ops,
27665 .cable_detect = ata_cable_40wire,
27666 .set_piomode = ali_set_piomode,
27667@@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
27668 * Port operations for DMA capable ALi without cable
27669 * detect
27670 */
27671-static struct ata_port_operations ali_20_port_ops = {
27672+static const struct ata_port_operations ali_20_port_ops = {
27673 .inherits = &ali_dma_base_ops,
27674 .cable_detect = ata_cable_40wire,
27675 .mode_filter = ali_20_filter,
27676@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
27677 /*
27678 * Port operations for DMA capable ALi with cable detect
27679 */
27680-static struct ata_port_operations ali_c2_port_ops = {
27681+static const struct ata_port_operations ali_c2_port_ops = {
27682 .inherits = &ali_dma_base_ops,
27683 .check_atapi_dma = ali_check_atapi_dma,
27684 .cable_detect = ali_c2_cable_detect,
27685@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
27686 /*
27687 * Port operations for DMA capable ALi with cable detect
27688 */
27689-static struct ata_port_operations ali_c4_port_ops = {
27690+static const struct ata_port_operations ali_c4_port_ops = {
27691 .inherits = &ali_dma_base_ops,
27692 .check_atapi_dma = ali_check_atapi_dma,
27693 .cable_detect = ali_c2_cable_detect,
27694@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
27695 /*
27696 * Port operations for DMA capable ALi with cable detect and LBA48
27697 */
27698-static struct ata_port_operations ali_c5_port_ops = {
27699+static const struct ata_port_operations ali_c5_port_ops = {
27700 .inherits = &ali_dma_base_ops,
27701 .check_atapi_dma = ali_check_atapi_dma,
27702 .dev_config = ali_warn_atapi_dma,
27703diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
27704index 567f3f7..c8ee0da 100644
27705--- a/drivers/ata/pata_amd.c
27706+++ b/drivers/ata/pata_amd.c
27707@@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
27708 .prereset = amd_pre_reset,
27709 };
27710
27711-static struct ata_port_operations amd33_port_ops = {
27712+static const struct ata_port_operations amd33_port_ops = {
27713 .inherits = &amd_base_port_ops,
27714 .cable_detect = ata_cable_40wire,
27715 .set_piomode = amd33_set_piomode,
27716 .set_dmamode = amd33_set_dmamode,
27717 };
27718
27719-static struct ata_port_operations amd66_port_ops = {
27720+static const struct ata_port_operations amd66_port_ops = {
27721 .inherits = &amd_base_port_ops,
27722 .cable_detect = ata_cable_unknown,
27723 .set_piomode = amd66_set_piomode,
27724 .set_dmamode = amd66_set_dmamode,
27725 };
27726
27727-static struct ata_port_operations amd100_port_ops = {
27728+static const struct ata_port_operations amd100_port_ops = {
27729 .inherits = &amd_base_port_ops,
27730 .cable_detect = ata_cable_unknown,
27731 .set_piomode = amd100_set_piomode,
27732 .set_dmamode = amd100_set_dmamode,
27733 };
27734
27735-static struct ata_port_operations amd133_port_ops = {
27736+static const struct ata_port_operations amd133_port_ops = {
27737 .inherits = &amd_base_port_ops,
27738 .cable_detect = amd_cable_detect,
27739 .set_piomode = amd133_set_piomode,
27740@@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
27741 .host_stop = nv_host_stop,
27742 };
27743
27744-static struct ata_port_operations nv100_port_ops = {
27745+static const struct ata_port_operations nv100_port_ops = {
27746 .inherits = &nv_base_port_ops,
27747 .set_piomode = nv100_set_piomode,
27748 .set_dmamode = nv100_set_dmamode,
27749 };
27750
27751-static struct ata_port_operations nv133_port_ops = {
27752+static const struct ata_port_operations nv133_port_ops = {
27753 .inherits = &nv_base_port_ops,
27754 .set_piomode = nv133_set_piomode,
27755 .set_dmamode = nv133_set_dmamode,
27756diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
27757index d332cfd..4b7eaae 100644
27758--- a/drivers/ata/pata_artop.c
27759+++ b/drivers/ata/pata_artop.c
27760@@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
27761 ATA_BMDMA_SHT(DRV_NAME),
27762 };
27763
27764-static struct ata_port_operations artop6210_ops = {
27765+static const struct ata_port_operations artop6210_ops = {
27766 .inherits = &ata_bmdma_port_ops,
27767 .cable_detect = ata_cable_40wire,
27768 .set_piomode = artop6210_set_piomode,
27769@@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
27770 .qc_defer = artop6210_qc_defer,
27771 };
27772
27773-static struct ata_port_operations artop6260_ops = {
27774+static const struct ata_port_operations artop6260_ops = {
27775 .inherits = &ata_bmdma_port_ops,
27776 .cable_detect = artop6260_cable_detect,
27777 .set_piomode = artop6260_set_piomode,
27778diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
27779index 5c129f9..7bb7ccb 100644
27780--- a/drivers/ata/pata_at32.c
27781+++ b/drivers/ata/pata_at32.c
27782@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
27783 ATA_PIO_SHT(DRV_NAME),
27784 };
27785
27786-static struct ata_port_operations at32_port_ops = {
27787+static const struct ata_port_operations at32_port_ops = {
27788 .inherits = &ata_sff_port_ops,
27789 .cable_detect = ata_cable_40wire,
27790 .set_piomode = pata_at32_set_piomode,
27791diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
27792index 41c94b1..829006d 100644
27793--- a/drivers/ata/pata_at91.c
27794+++ b/drivers/ata/pata_at91.c
27795@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
27796 ATA_PIO_SHT(DRV_NAME),
27797 };
27798
27799-static struct ata_port_operations pata_at91_port_ops = {
27800+static const struct ata_port_operations pata_at91_port_ops = {
27801 .inherits = &ata_sff_port_ops,
27802
27803 .sff_data_xfer = pata_at91_data_xfer_noirq,
27804diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
27805index ae4454d..d391eb4 100644
27806--- a/drivers/ata/pata_atiixp.c
27807+++ b/drivers/ata/pata_atiixp.c
27808@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
27809 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
27810 };
27811
27812-static struct ata_port_operations atiixp_port_ops = {
27813+static const struct ata_port_operations atiixp_port_ops = {
27814 .inherits = &ata_bmdma_port_ops,
27815
27816 .qc_prep = ata_sff_dumb_qc_prep,
27817diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
27818index 6fe7ded..2a425dc 100644
27819--- a/drivers/ata/pata_atp867x.c
27820+++ b/drivers/ata/pata_atp867x.c
27821@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
27822 ATA_BMDMA_SHT(DRV_NAME),
27823 };
27824
27825-static struct ata_port_operations atp867x_ops = {
27826+static const struct ata_port_operations atp867x_ops = {
27827 .inherits = &ata_bmdma_port_ops,
27828 .cable_detect = atp867x_cable_detect,
27829 .set_piomode = atp867x_set_piomode,
27830diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
27831index c4b47a3..b27a367 100644
27832--- a/drivers/ata/pata_bf54x.c
27833+++ b/drivers/ata/pata_bf54x.c
27834@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
27835 .dma_boundary = ATA_DMA_BOUNDARY,
27836 };
27837
27838-static struct ata_port_operations bfin_pata_ops = {
27839+static const struct ata_port_operations bfin_pata_ops = {
27840 .inherits = &ata_sff_port_ops,
27841
27842 .set_piomode = bfin_set_piomode,
27843diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
27844index 5acf9fa..84248be 100644
27845--- a/drivers/ata/pata_cmd640.c
27846+++ b/drivers/ata/pata_cmd640.c
27847@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
27848 ATA_BMDMA_SHT(DRV_NAME),
27849 };
27850
27851-static struct ata_port_operations cmd640_port_ops = {
27852+static const struct ata_port_operations cmd640_port_ops = {
27853 .inherits = &ata_bmdma_port_ops,
27854 /* In theory xfer_noirq is not needed once we kill the prefetcher */
27855 .sff_data_xfer = ata_sff_data_xfer_noirq,
27856diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
27857index ccd2694..c869c3d 100644
27858--- a/drivers/ata/pata_cmd64x.c
27859+++ b/drivers/ata/pata_cmd64x.c
27860@@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
27861 .set_dmamode = cmd64x_set_dmamode,
27862 };
27863
27864-static struct ata_port_operations cmd64x_port_ops = {
27865+static const struct ata_port_operations cmd64x_port_ops = {
27866 .inherits = &cmd64x_base_ops,
27867 .cable_detect = ata_cable_40wire,
27868 };
27869
27870-static struct ata_port_operations cmd646r1_port_ops = {
27871+static const struct ata_port_operations cmd646r1_port_ops = {
27872 .inherits = &cmd64x_base_ops,
27873 .bmdma_stop = cmd646r1_bmdma_stop,
27874 .cable_detect = ata_cable_40wire,
27875 };
27876
27877-static struct ata_port_operations cmd648_port_ops = {
27878+static const struct ata_port_operations cmd648_port_ops = {
27879 .inherits = &cmd64x_base_ops,
27880 .bmdma_stop = cmd648_bmdma_stop,
27881 .cable_detect = cmd648_cable_detect,
27882diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
27883index 0df83cf..d7595b0 100644
27884--- a/drivers/ata/pata_cs5520.c
27885+++ b/drivers/ata/pata_cs5520.c
27886@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
27887 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
27888 };
27889
27890-static struct ata_port_operations cs5520_port_ops = {
27891+static const struct ata_port_operations cs5520_port_ops = {
27892 .inherits = &ata_bmdma_port_ops,
27893 .qc_prep = ata_sff_dumb_qc_prep,
27894 .cable_detect = ata_cable_40wire,
27895diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
27896index c974b05..6d26b11 100644
27897--- a/drivers/ata/pata_cs5530.c
27898+++ b/drivers/ata/pata_cs5530.c
27899@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
27900 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
27901 };
27902
27903-static struct ata_port_operations cs5530_port_ops = {
27904+static const struct ata_port_operations cs5530_port_ops = {
27905 .inherits = &ata_bmdma_port_ops,
27906
27907 .qc_prep = ata_sff_dumb_qc_prep,
27908diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
27909index 403f561..aacd26b 100644
27910--- a/drivers/ata/pata_cs5535.c
27911+++ b/drivers/ata/pata_cs5535.c
27912@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
27913 ATA_BMDMA_SHT(DRV_NAME),
27914 };
27915
27916-static struct ata_port_operations cs5535_port_ops = {
27917+static const struct ata_port_operations cs5535_port_ops = {
27918 .inherits = &ata_bmdma_port_ops,
27919 .cable_detect = cs5535_cable_detect,
27920 .set_piomode = cs5535_set_piomode,
27921diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
27922index 6da4cb4..de24a25 100644
27923--- a/drivers/ata/pata_cs5536.c
27924+++ b/drivers/ata/pata_cs5536.c
27925@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
27926 ATA_BMDMA_SHT(DRV_NAME),
27927 };
27928
27929-static struct ata_port_operations cs5536_port_ops = {
27930+static const struct ata_port_operations cs5536_port_ops = {
27931 .inherits = &ata_bmdma_port_ops,
27932 .cable_detect = cs5536_cable_detect,
27933 .set_piomode = cs5536_set_piomode,
27934diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
27935index 8fb040b..b16a9c9 100644
27936--- a/drivers/ata/pata_cypress.c
27937+++ b/drivers/ata/pata_cypress.c
27938@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
27939 ATA_BMDMA_SHT(DRV_NAME),
27940 };
27941
27942-static struct ata_port_operations cy82c693_port_ops = {
27943+static const struct ata_port_operations cy82c693_port_ops = {
27944 .inherits = &ata_bmdma_port_ops,
27945 .cable_detect = ata_cable_40wire,
27946 .set_piomode = cy82c693_set_piomode,
27947diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
27948index 2a6412f..555ee11 100644
27949--- a/drivers/ata/pata_efar.c
27950+++ b/drivers/ata/pata_efar.c
27951@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
27952 ATA_BMDMA_SHT(DRV_NAME),
27953 };
27954
27955-static struct ata_port_operations efar_ops = {
27956+static const struct ata_port_operations efar_ops = {
27957 .inherits = &ata_bmdma_port_ops,
27958 .cable_detect = efar_cable_detect,
27959 .set_piomode = efar_set_piomode,
27960diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
27961index b9d8836..0b92030 100644
27962--- a/drivers/ata/pata_hpt366.c
27963+++ b/drivers/ata/pata_hpt366.c
27964@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
27965 * Configuration for HPT366/68
27966 */
27967
27968-static struct ata_port_operations hpt366_port_ops = {
27969+static const struct ata_port_operations hpt366_port_ops = {
27970 .inherits = &ata_bmdma_port_ops,
27971 .cable_detect = hpt36x_cable_detect,
27972 .mode_filter = hpt366_filter,
27973diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
27974index 5af7f19..00c4980 100644
27975--- a/drivers/ata/pata_hpt37x.c
27976+++ b/drivers/ata/pata_hpt37x.c
27977@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
27978 * Configuration for HPT370
27979 */
27980
27981-static struct ata_port_operations hpt370_port_ops = {
27982+static const struct ata_port_operations hpt370_port_ops = {
27983 .inherits = &ata_bmdma_port_ops,
27984
27985 .bmdma_stop = hpt370_bmdma_stop,
27986@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
27987 * Configuration for HPT370A. Close to 370 but less filters
27988 */
27989
27990-static struct ata_port_operations hpt370a_port_ops = {
27991+static const struct ata_port_operations hpt370a_port_ops = {
27992 .inherits = &hpt370_port_ops,
27993 .mode_filter = hpt370a_filter,
27994 };
27995@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
27996 * and DMA mode setting functionality.
27997 */
27998
27999-static struct ata_port_operations hpt372_port_ops = {
28000+static const struct ata_port_operations hpt372_port_ops = {
28001 .inherits = &ata_bmdma_port_ops,
28002
28003 .bmdma_stop = hpt37x_bmdma_stop,
28004@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28005 * but we have a different cable detection procedure for function 1.
28006 */
28007
28008-static struct ata_port_operations hpt374_fn1_port_ops = {
28009+static const struct ata_port_operations hpt374_fn1_port_ops = {
28010 .inherits = &hpt372_port_ops,
28011 .prereset = hpt374_fn1_pre_reset,
28012 };
28013diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
28014index 100f227..2e39382 100644
28015--- a/drivers/ata/pata_hpt3x2n.c
28016+++ b/drivers/ata/pata_hpt3x2n.c
28017@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
28018 * Configuration for HPT3x2n.
28019 */
28020
28021-static struct ata_port_operations hpt3x2n_port_ops = {
28022+static const struct ata_port_operations hpt3x2n_port_ops = {
28023 .inherits = &ata_bmdma_port_ops,
28024
28025 .bmdma_stop = hpt3x2n_bmdma_stop,
28026diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
28027index 7e31025..6fca8f4 100644
28028--- a/drivers/ata/pata_hpt3x3.c
28029+++ b/drivers/ata/pata_hpt3x3.c
28030@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
28031 ATA_BMDMA_SHT(DRV_NAME),
28032 };
28033
28034-static struct ata_port_operations hpt3x3_port_ops = {
28035+static const struct ata_port_operations hpt3x3_port_ops = {
28036 .inherits = &ata_bmdma_port_ops,
28037 .cable_detect = ata_cable_40wire,
28038 .set_piomode = hpt3x3_set_piomode,
28039diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
28040index b663b7f..9a26c2a 100644
28041--- a/drivers/ata/pata_icside.c
28042+++ b/drivers/ata/pata_icside.c
28043@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
28044 }
28045 }
28046
28047-static struct ata_port_operations pata_icside_port_ops = {
28048+static const struct ata_port_operations pata_icside_port_ops = {
28049 .inherits = &ata_sff_port_ops,
28050 /* no need to build any PRD tables for DMA */
28051 .qc_prep = ata_noop_qc_prep,
28052diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
28053index 4bceb88..457dfb6 100644
28054--- a/drivers/ata/pata_isapnp.c
28055+++ b/drivers/ata/pata_isapnp.c
28056@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
28057 ATA_PIO_SHT(DRV_NAME),
28058 };
28059
28060-static struct ata_port_operations isapnp_port_ops = {
28061+static const struct ata_port_operations isapnp_port_ops = {
28062 .inherits = &ata_sff_port_ops,
28063 .cable_detect = ata_cable_40wire,
28064 };
28065
28066-static struct ata_port_operations isapnp_noalt_port_ops = {
28067+static const struct ata_port_operations isapnp_noalt_port_ops = {
28068 .inherits = &ata_sff_port_ops,
28069 .cable_detect = ata_cable_40wire,
28070 /* No altstatus so we don't want to use the lost interrupt poll */
28071diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
28072index f156da8..24976e2 100644
28073--- a/drivers/ata/pata_it8213.c
28074+++ b/drivers/ata/pata_it8213.c
28075@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
28076 };
28077
28078
28079-static struct ata_port_operations it8213_ops = {
28080+static const struct ata_port_operations it8213_ops = {
28081 .inherits = &ata_bmdma_port_ops,
28082 .cable_detect = it8213_cable_detect,
28083 .set_piomode = it8213_set_piomode,
28084diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
28085index 188bc2f..ca9e785 100644
28086--- a/drivers/ata/pata_it821x.c
28087+++ b/drivers/ata/pata_it821x.c
28088@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
28089 ATA_BMDMA_SHT(DRV_NAME),
28090 };
28091
28092-static struct ata_port_operations it821x_smart_port_ops = {
28093+static const struct ata_port_operations it821x_smart_port_ops = {
28094 .inherits = &ata_bmdma_port_ops,
28095
28096 .check_atapi_dma= it821x_check_atapi_dma,
28097@@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
28098 .port_start = it821x_port_start,
28099 };
28100
28101-static struct ata_port_operations it821x_passthru_port_ops = {
28102+static const struct ata_port_operations it821x_passthru_port_ops = {
28103 .inherits = &ata_bmdma_port_ops,
28104
28105 .check_atapi_dma= it821x_check_atapi_dma,
28106@@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
28107 .port_start = it821x_port_start,
28108 };
28109
28110-static struct ata_port_operations it821x_rdc_port_ops = {
28111+static const struct ata_port_operations it821x_rdc_port_ops = {
28112 .inherits = &ata_bmdma_port_ops,
28113
28114 .check_atapi_dma= it821x_check_atapi_dma,
28115diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
28116index ba54b08..4b952b7 100644
28117--- a/drivers/ata/pata_ixp4xx_cf.c
28118+++ b/drivers/ata/pata_ixp4xx_cf.c
28119@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
28120 ATA_PIO_SHT(DRV_NAME),
28121 };
28122
28123-static struct ata_port_operations ixp4xx_port_ops = {
28124+static const struct ata_port_operations ixp4xx_port_ops = {
28125 .inherits = &ata_sff_port_ops,
28126 .sff_data_xfer = ixp4xx_mmio_data_xfer,
28127 .cable_detect = ata_cable_40wire,
28128diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
28129index 3a1474a..434b0ff 100644
28130--- a/drivers/ata/pata_jmicron.c
28131+++ b/drivers/ata/pata_jmicron.c
28132@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
28133 ATA_BMDMA_SHT(DRV_NAME),
28134 };
28135
28136-static struct ata_port_operations jmicron_ops = {
28137+static const struct ata_port_operations jmicron_ops = {
28138 .inherits = &ata_bmdma_port_ops,
28139 .prereset = jmicron_pre_reset,
28140 };
28141diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
28142index 6932e56..220e71d 100644
28143--- a/drivers/ata/pata_legacy.c
28144+++ b/drivers/ata/pata_legacy.c
28145@@ -106,7 +106,7 @@ struct legacy_probe {
28146
28147 struct legacy_controller {
28148 const char *name;
28149- struct ata_port_operations *ops;
28150+ const struct ata_port_operations *ops;
28151 unsigned int pio_mask;
28152 unsigned int flags;
28153 unsigned int pflags;
28154@@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
28155 * pio_mask as well.
28156 */
28157
28158-static struct ata_port_operations simple_port_ops = {
28159+static const struct ata_port_operations simple_port_ops = {
28160 .inherits = &legacy_base_port_ops,
28161 .sff_data_xfer = ata_sff_data_xfer_noirq,
28162 };
28163
28164-static struct ata_port_operations legacy_port_ops = {
28165+static const struct ata_port_operations legacy_port_ops = {
28166 .inherits = &legacy_base_port_ops,
28167 .sff_data_xfer = ata_sff_data_xfer_noirq,
28168 .set_mode = legacy_set_mode,
28169@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
28170 return buflen;
28171 }
28172
28173-static struct ata_port_operations pdc20230_port_ops = {
28174+static const struct ata_port_operations pdc20230_port_ops = {
28175 .inherits = &legacy_base_port_ops,
28176 .set_piomode = pdc20230_set_piomode,
28177 .sff_data_xfer = pdc_data_xfer_vlb,
28178@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
28179 ioread8(ap->ioaddr.status_addr);
28180 }
28181
28182-static struct ata_port_operations ht6560a_port_ops = {
28183+static const struct ata_port_operations ht6560a_port_ops = {
28184 .inherits = &legacy_base_port_ops,
28185 .set_piomode = ht6560a_set_piomode,
28186 };
28187@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
28188 ioread8(ap->ioaddr.status_addr);
28189 }
28190
28191-static struct ata_port_operations ht6560b_port_ops = {
28192+static const struct ata_port_operations ht6560b_port_ops = {
28193 .inherits = &legacy_base_port_ops,
28194 .set_piomode = ht6560b_set_piomode,
28195 };
28196@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
28197 }
28198
28199
28200-static struct ata_port_operations opti82c611a_port_ops = {
28201+static const struct ata_port_operations opti82c611a_port_ops = {
28202 .inherits = &legacy_base_port_ops,
28203 .set_piomode = opti82c611a_set_piomode,
28204 };
28205@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
28206 return ata_sff_qc_issue(qc);
28207 }
28208
28209-static struct ata_port_operations opti82c46x_port_ops = {
28210+static const struct ata_port_operations opti82c46x_port_ops = {
28211 .inherits = &legacy_base_port_ops,
28212 .set_piomode = opti82c46x_set_piomode,
28213 .qc_issue = opti82c46x_qc_issue,
28214@@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
28215 return 0;
28216 }
28217
28218-static struct ata_port_operations qdi6500_port_ops = {
28219+static const struct ata_port_operations qdi6500_port_ops = {
28220 .inherits = &legacy_base_port_ops,
28221 .set_piomode = qdi6500_set_piomode,
28222 .qc_issue = qdi_qc_issue,
28223 .sff_data_xfer = vlb32_data_xfer,
28224 };
28225
28226-static struct ata_port_operations qdi6580_port_ops = {
28227+static const struct ata_port_operations qdi6580_port_ops = {
28228 .inherits = &legacy_base_port_ops,
28229 .set_piomode = qdi6580_set_piomode,
28230 .sff_data_xfer = vlb32_data_xfer,
28231 };
28232
28233-static struct ata_port_operations qdi6580dp_port_ops = {
28234+static const struct ata_port_operations qdi6580dp_port_ops = {
28235 .inherits = &legacy_base_port_ops,
28236 .set_piomode = qdi6580dp_set_piomode,
28237 .sff_data_xfer = vlb32_data_xfer,
28238@@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
28239 return 0;
28240 }
28241
28242-static struct ata_port_operations winbond_port_ops = {
28243+static const struct ata_port_operations winbond_port_ops = {
28244 .inherits = &legacy_base_port_ops,
28245 .set_piomode = winbond_set_piomode,
28246 .sff_data_xfer = vlb32_data_xfer,
28247@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
28248 int pio_modes = controller->pio_mask;
28249 unsigned long io = probe->port;
28250 u32 mask = (1 << probe->slot);
28251- struct ata_port_operations *ops = controller->ops;
28252+ const struct ata_port_operations *ops = controller->ops;
28253 struct legacy_data *ld = &legacy_data[probe->slot];
28254 struct ata_host *host = NULL;
28255 struct ata_port *ap;
28256diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
28257index 2096fb7..4d090fc 100644
28258--- a/drivers/ata/pata_marvell.c
28259+++ b/drivers/ata/pata_marvell.c
28260@@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
28261 ATA_BMDMA_SHT(DRV_NAME),
28262 };
28263
28264-static struct ata_port_operations marvell_ops = {
28265+static const struct ata_port_operations marvell_ops = {
28266 .inherits = &ata_bmdma_port_ops,
28267 .cable_detect = marvell_cable_detect,
28268 .prereset = marvell_pre_reset,
28269diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
28270index 99d41be..7d56aa8 100644
28271--- a/drivers/ata/pata_mpc52xx.c
28272+++ b/drivers/ata/pata_mpc52xx.c
28273@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
28274 ATA_PIO_SHT(DRV_NAME),
28275 };
28276
28277-static struct ata_port_operations mpc52xx_ata_port_ops = {
28278+static const struct ata_port_operations mpc52xx_ata_port_ops = {
28279 .inherits = &ata_bmdma_port_ops,
28280 .sff_dev_select = mpc52xx_ata_dev_select,
28281 .set_piomode = mpc52xx_ata_set_piomode,
28282diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
28283index b21f002..0a27e7f 100644
28284--- a/drivers/ata/pata_mpiix.c
28285+++ b/drivers/ata/pata_mpiix.c
28286@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
28287 ATA_PIO_SHT(DRV_NAME),
28288 };
28289
28290-static struct ata_port_operations mpiix_port_ops = {
28291+static const struct ata_port_operations mpiix_port_ops = {
28292 .inherits = &ata_sff_port_ops,
28293 .qc_issue = mpiix_qc_issue,
28294 .cable_detect = ata_cable_40wire,
28295diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
28296index f0d52f7..89c3be3 100644
28297--- a/drivers/ata/pata_netcell.c
28298+++ b/drivers/ata/pata_netcell.c
28299@@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
28300 ATA_BMDMA_SHT(DRV_NAME),
28301 };
28302
28303-static struct ata_port_operations netcell_ops = {
28304+static const struct ata_port_operations netcell_ops = {
28305 .inherits = &ata_bmdma_port_ops,
28306 .cable_detect = ata_cable_80wire,
28307 .read_id = netcell_read_id,
28308diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
28309index dd53a66..a3f4317 100644
28310--- a/drivers/ata/pata_ninja32.c
28311+++ b/drivers/ata/pata_ninja32.c
28312@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
28313 ATA_BMDMA_SHT(DRV_NAME),
28314 };
28315
28316-static struct ata_port_operations ninja32_port_ops = {
28317+static const struct ata_port_operations ninja32_port_ops = {
28318 .inherits = &ata_bmdma_port_ops,
28319 .sff_dev_select = ninja32_dev_select,
28320 .cable_detect = ata_cable_40wire,
28321diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
28322index ca53fac..9aa93ef 100644
28323--- a/drivers/ata/pata_ns87410.c
28324+++ b/drivers/ata/pata_ns87410.c
28325@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
28326 ATA_PIO_SHT(DRV_NAME),
28327 };
28328
28329-static struct ata_port_operations ns87410_port_ops = {
28330+static const struct ata_port_operations ns87410_port_ops = {
28331 .inherits = &ata_sff_port_ops,
28332 .qc_issue = ns87410_qc_issue,
28333 .cable_detect = ata_cable_40wire,
28334diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
28335index 773b159..55f454e 100644
28336--- a/drivers/ata/pata_ns87415.c
28337+++ b/drivers/ata/pata_ns87415.c
28338@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
28339 }
28340 #endif /* 87560 SuperIO Support */
28341
28342-static struct ata_port_operations ns87415_pata_ops = {
28343+static const struct ata_port_operations ns87415_pata_ops = {
28344 .inherits = &ata_bmdma_port_ops,
28345
28346 .check_atapi_dma = ns87415_check_atapi_dma,
28347@@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
28348 };
28349
28350 #if defined(CONFIG_SUPERIO)
28351-static struct ata_port_operations ns87560_pata_ops = {
28352+static const struct ata_port_operations ns87560_pata_ops = {
28353 .inherits = &ns87415_pata_ops,
28354 .sff_tf_read = ns87560_tf_read,
28355 .sff_check_status = ns87560_check_status,
28356diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
28357index d6f6956..639295b 100644
28358--- a/drivers/ata/pata_octeon_cf.c
28359+++ b/drivers/ata/pata_octeon_cf.c
28360@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
28361 return 0;
28362 }
28363
28364+/* cannot be const */
28365 static struct ata_port_operations octeon_cf_ops = {
28366 .inherits = &ata_sff_port_ops,
28367 .check_atapi_dma = octeon_cf_check_atapi_dma,
28368diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
28369index 84ac503..adee1cd 100644
28370--- a/drivers/ata/pata_oldpiix.c
28371+++ b/drivers/ata/pata_oldpiix.c
28372@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
28373 ATA_BMDMA_SHT(DRV_NAME),
28374 };
28375
28376-static struct ata_port_operations oldpiix_pata_ops = {
28377+static const struct ata_port_operations oldpiix_pata_ops = {
28378 .inherits = &ata_bmdma_port_ops,
28379 .qc_issue = oldpiix_qc_issue,
28380 .cable_detect = ata_cable_40wire,
28381diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
28382index 99eddda..3a4c0aa 100644
28383--- a/drivers/ata/pata_opti.c
28384+++ b/drivers/ata/pata_opti.c
28385@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
28386 ATA_PIO_SHT(DRV_NAME),
28387 };
28388
28389-static struct ata_port_operations opti_port_ops = {
28390+static const struct ata_port_operations opti_port_ops = {
28391 .inherits = &ata_sff_port_ops,
28392 .cable_detect = ata_cable_40wire,
28393 .set_piomode = opti_set_piomode,
28394diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
28395index 86885a4..8e9968d 100644
28396--- a/drivers/ata/pata_optidma.c
28397+++ b/drivers/ata/pata_optidma.c
28398@@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
28399 ATA_BMDMA_SHT(DRV_NAME),
28400 };
28401
28402-static struct ata_port_operations optidma_port_ops = {
28403+static const struct ata_port_operations optidma_port_ops = {
28404 .inherits = &ata_bmdma_port_ops,
28405 .cable_detect = ata_cable_40wire,
28406 .set_piomode = optidma_set_pio_mode,
28407@@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
28408 .prereset = optidma_pre_reset,
28409 };
28410
28411-static struct ata_port_operations optiplus_port_ops = {
28412+static const struct ata_port_operations optiplus_port_ops = {
28413 .inherits = &optidma_port_ops,
28414 .set_piomode = optiplus_set_pio_mode,
28415 .set_dmamode = optiplus_set_dma_mode,
28416diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
28417index 11fb4cc..1a14022 100644
28418--- a/drivers/ata/pata_palmld.c
28419+++ b/drivers/ata/pata_palmld.c
28420@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
28421 ATA_PIO_SHT(DRV_NAME),
28422 };
28423
28424-static struct ata_port_operations palmld_port_ops = {
28425+static const struct ata_port_operations palmld_port_ops = {
28426 .inherits = &ata_sff_port_ops,
28427 .sff_data_xfer = ata_sff_data_xfer_noirq,
28428 .cable_detect = ata_cable_40wire,
28429diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
28430index dc99e26..7f4b1e4 100644
28431--- a/drivers/ata/pata_pcmcia.c
28432+++ b/drivers/ata/pata_pcmcia.c
28433@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
28434 ATA_PIO_SHT(DRV_NAME),
28435 };
28436
28437-static struct ata_port_operations pcmcia_port_ops = {
28438+static const struct ata_port_operations pcmcia_port_ops = {
28439 .inherits = &ata_sff_port_ops,
28440 .sff_data_xfer = ata_sff_data_xfer_noirq,
28441 .cable_detect = ata_cable_40wire,
28442 .set_mode = pcmcia_set_mode,
28443 };
28444
28445-static struct ata_port_operations pcmcia_8bit_port_ops = {
28446+static const struct ata_port_operations pcmcia_8bit_port_ops = {
28447 .inherits = &ata_sff_port_ops,
28448 .sff_data_xfer = ata_data_xfer_8bit,
28449 .cable_detect = ata_cable_40wire,
28450@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
28451 unsigned long io_base, ctl_base;
28452 void __iomem *io_addr, *ctl_addr;
28453 int n_ports = 1;
28454- struct ata_port_operations *ops = &pcmcia_port_ops;
28455+ const struct ata_port_operations *ops = &pcmcia_port_ops;
28456
28457 info = kzalloc(sizeof(*info), GFP_KERNEL);
28458 if (info == NULL)
28459diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
28460index ca5cad0..3a1f125 100644
28461--- a/drivers/ata/pata_pdc2027x.c
28462+++ b/drivers/ata/pata_pdc2027x.c
28463@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
28464 ATA_BMDMA_SHT(DRV_NAME),
28465 };
28466
28467-static struct ata_port_operations pdc2027x_pata100_ops = {
28468+static const struct ata_port_operations pdc2027x_pata100_ops = {
28469 .inherits = &ata_bmdma_port_ops,
28470 .check_atapi_dma = pdc2027x_check_atapi_dma,
28471 .cable_detect = pdc2027x_cable_detect,
28472 .prereset = pdc2027x_prereset,
28473 };
28474
28475-static struct ata_port_operations pdc2027x_pata133_ops = {
28476+static const struct ata_port_operations pdc2027x_pata133_ops = {
28477 .inherits = &pdc2027x_pata100_ops,
28478 .mode_filter = pdc2027x_mode_filter,
28479 .set_piomode = pdc2027x_set_piomode,
28480diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
28481index 2911120..4bf62aa 100644
28482--- a/drivers/ata/pata_pdc202xx_old.c
28483+++ b/drivers/ata/pata_pdc202xx_old.c
28484@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
28485 ATA_BMDMA_SHT(DRV_NAME),
28486 };
28487
28488-static struct ata_port_operations pdc2024x_port_ops = {
28489+static const struct ata_port_operations pdc2024x_port_ops = {
28490 .inherits = &ata_bmdma_port_ops,
28491
28492 .cable_detect = ata_cable_40wire,
28493@@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
28494 .sff_exec_command = pdc202xx_exec_command,
28495 };
28496
28497-static struct ata_port_operations pdc2026x_port_ops = {
28498+static const struct ata_port_operations pdc2026x_port_ops = {
28499 .inherits = &pdc2024x_port_ops,
28500
28501 .check_atapi_dma = pdc2026x_check_atapi_dma,
28502diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
28503index 3f6ebc6..a18c358 100644
28504--- a/drivers/ata/pata_platform.c
28505+++ b/drivers/ata/pata_platform.c
28506@@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
28507 ATA_PIO_SHT(DRV_NAME),
28508 };
28509
28510-static struct ata_port_operations pata_platform_port_ops = {
28511+static const struct ata_port_operations pata_platform_port_ops = {
28512 .inherits = &ata_sff_port_ops,
28513 .sff_data_xfer = ata_sff_data_xfer_noirq,
28514 .cable_detect = ata_cable_unknown,
28515diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
28516index 45879dc..165a9f9 100644
28517--- a/drivers/ata/pata_qdi.c
28518+++ b/drivers/ata/pata_qdi.c
28519@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
28520 ATA_PIO_SHT(DRV_NAME),
28521 };
28522
28523-static struct ata_port_operations qdi6500_port_ops = {
28524+static const struct ata_port_operations qdi6500_port_ops = {
28525 .inherits = &ata_sff_port_ops,
28526 .qc_issue = qdi_qc_issue,
28527 .sff_data_xfer = qdi_data_xfer,
28528@@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
28529 .set_piomode = qdi6500_set_piomode,
28530 };
28531
28532-static struct ata_port_operations qdi6580_port_ops = {
28533+static const struct ata_port_operations qdi6580_port_ops = {
28534 .inherits = &qdi6500_port_ops,
28535 .set_piomode = qdi6580_set_piomode,
28536 };
28537diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
28538index 4401b33..716c5cc 100644
28539--- a/drivers/ata/pata_radisys.c
28540+++ b/drivers/ata/pata_radisys.c
28541@@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
28542 ATA_BMDMA_SHT(DRV_NAME),
28543 };
28544
28545-static struct ata_port_operations radisys_pata_ops = {
28546+static const struct ata_port_operations radisys_pata_ops = {
28547 .inherits = &ata_bmdma_port_ops,
28548 .qc_issue = radisys_qc_issue,
28549 .cable_detect = ata_cable_unknown,
28550diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
28551index 45f1e10..fab6bca 100644
28552--- a/drivers/ata/pata_rb532_cf.c
28553+++ b/drivers/ata/pata_rb532_cf.c
28554@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
28555 return IRQ_HANDLED;
28556 }
28557
28558-static struct ata_port_operations rb532_pata_port_ops = {
28559+static const struct ata_port_operations rb532_pata_port_ops = {
28560 .inherits = &ata_sff_port_ops,
28561 .sff_data_xfer = ata_sff_data_xfer32,
28562 };
28563diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
28564index c843a1e..b5853c3 100644
28565--- a/drivers/ata/pata_rdc.c
28566+++ b/drivers/ata/pata_rdc.c
28567@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
28568 pci_write_config_byte(dev, 0x48, udma_enable);
28569 }
28570
28571-static struct ata_port_operations rdc_pata_ops = {
28572+static const struct ata_port_operations rdc_pata_ops = {
28573 .inherits = &ata_bmdma32_port_ops,
28574 .cable_detect = rdc_pata_cable_detect,
28575 .set_piomode = rdc_set_piomode,
28576diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
28577index a5e4dfe..080c8c9 100644
28578--- a/drivers/ata/pata_rz1000.c
28579+++ b/drivers/ata/pata_rz1000.c
28580@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
28581 ATA_PIO_SHT(DRV_NAME),
28582 };
28583
28584-static struct ata_port_operations rz1000_port_ops = {
28585+static const struct ata_port_operations rz1000_port_ops = {
28586 .inherits = &ata_sff_port_ops,
28587 .cable_detect = ata_cable_40wire,
28588 .set_mode = rz1000_set_mode,
28589diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
28590index 3bbed83..e309daf 100644
28591--- a/drivers/ata/pata_sc1200.c
28592+++ b/drivers/ata/pata_sc1200.c
28593@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
28594 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28595 };
28596
28597-static struct ata_port_operations sc1200_port_ops = {
28598+static const struct ata_port_operations sc1200_port_ops = {
28599 .inherits = &ata_bmdma_port_ops,
28600 .qc_prep = ata_sff_dumb_qc_prep,
28601 .qc_issue = sc1200_qc_issue,
28602diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
28603index 4257d6b..4c1d9d5 100644
28604--- a/drivers/ata/pata_scc.c
28605+++ b/drivers/ata/pata_scc.c
28606@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
28607 ATA_BMDMA_SHT(DRV_NAME),
28608 };
28609
28610-static struct ata_port_operations scc_pata_ops = {
28611+static const struct ata_port_operations scc_pata_ops = {
28612 .inherits = &ata_bmdma_port_ops,
28613
28614 .set_piomode = scc_set_piomode,
28615diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
28616index 99cceb4..e2e0a87 100644
28617--- a/drivers/ata/pata_sch.c
28618+++ b/drivers/ata/pata_sch.c
28619@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
28620 ATA_BMDMA_SHT(DRV_NAME),
28621 };
28622
28623-static struct ata_port_operations sch_pata_ops = {
28624+static const struct ata_port_operations sch_pata_ops = {
28625 .inherits = &ata_bmdma_port_ops,
28626 .cable_detect = ata_cable_unknown,
28627 .set_piomode = sch_set_piomode,
28628diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
28629index beaed12..39969f1 100644
28630--- a/drivers/ata/pata_serverworks.c
28631+++ b/drivers/ata/pata_serverworks.c
28632@@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
28633 ATA_BMDMA_SHT(DRV_NAME),
28634 };
28635
28636-static struct ata_port_operations serverworks_osb4_port_ops = {
28637+static const struct ata_port_operations serverworks_osb4_port_ops = {
28638 .inherits = &ata_bmdma_port_ops,
28639 .cable_detect = serverworks_cable_detect,
28640 .mode_filter = serverworks_osb4_filter,
28641@@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
28642 .set_dmamode = serverworks_set_dmamode,
28643 };
28644
28645-static struct ata_port_operations serverworks_csb_port_ops = {
28646+static const struct ata_port_operations serverworks_csb_port_ops = {
28647 .inherits = &serverworks_osb4_port_ops,
28648 .mode_filter = serverworks_csb_filter,
28649 };
28650diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
28651index a2ace48..0463b44 100644
28652--- a/drivers/ata/pata_sil680.c
28653+++ b/drivers/ata/pata_sil680.c
28654@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
28655 ATA_BMDMA_SHT(DRV_NAME),
28656 };
28657
28658-static struct ata_port_operations sil680_port_ops = {
28659+static const struct ata_port_operations sil680_port_ops = {
28660 .inherits = &ata_bmdma32_port_ops,
28661 .cable_detect = sil680_cable_detect,
28662 .set_piomode = sil680_set_piomode,
28663diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
28664index 488e77b..b3724d5 100644
28665--- a/drivers/ata/pata_sis.c
28666+++ b/drivers/ata/pata_sis.c
28667@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
28668 ATA_BMDMA_SHT(DRV_NAME),
28669 };
28670
28671-static struct ata_port_operations sis_133_for_sata_ops = {
28672+static const struct ata_port_operations sis_133_for_sata_ops = {
28673 .inherits = &ata_bmdma_port_ops,
28674 .set_piomode = sis_133_set_piomode,
28675 .set_dmamode = sis_133_set_dmamode,
28676 .cable_detect = sis_133_cable_detect,
28677 };
28678
28679-static struct ata_port_operations sis_base_ops = {
28680+static const struct ata_port_operations sis_base_ops = {
28681 .inherits = &ata_bmdma_port_ops,
28682 .prereset = sis_pre_reset,
28683 };
28684
28685-static struct ata_port_operations sis_133_ops = {
28686+static const struct ata_port_operations sis_133_ops = {
28687 .inherits = &sis_base_ops,
28688 .set_piomode = sis_133_set_piomode,
28689 .set_dmamode = sis_133_set_dmamode,
28690 .cable_detect = sis_133_cable_detect,
28691 };
28692
28693-static struct ata_port_operations sis_133_early_ops = {
28694+static const struct ata_port_operations sis_133_early_ops = {
28695 .inherits = &sis_base_ops,
28696 .set_piomode = sis_100_set_piomode,
28697 .set_dmamode = sis_133_early_set_dmamode,
28698 .cable_detect = sis_66_cable_detect,
28699 };
28700
28701-static struct ata_port_operations sis_100_ops = {
28702+static const struct ata_port_operations sis_100_ops = {
28703 .inherits = &sis_base_ops,
28704 .set_piomode = sis_100_set_piomode,
28705 .set_dmamode = sis_100_set_dmamode,
28706 .cable_detect = sis_66_cable_detect,
28707 };
28708
28709-static struct ata_port_operations sis_66_ops = {
28710+static const struct ata_port_operations sis_66_ops = {
28711 .inherits = &sis_base_ops,
28712 .set_piomode = sis_old_set_piomode,
28713 .set_dmamode = sis_66_set_dmamode,
28714 .cable_detect = sis_66_cable_detect,
28715 };
28716
28717-static struct ata_port_operations sis_old_ops = {
28718+static const struct ata_port_operations sis_old_ops = {
28719 .inherits = &sis_base_ops,
28720 .set_piomode = sis_old_set_piomode,
28721 .set_dmamode = sis_old_set_dmamode,
28722diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
28723index 29f733c..43e9ca0 100644
28724--- a/drivers/ata/pata_sl82c105.c
28725+++ b/drivers/ata/pata_sl82c105.c
28726@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
28727 ATA_BMDMA_SHT(DRV_NAME),
28728 };
28729
28730-static struct ata_port_operations sl82c105_port_ops = {
28731+static const struct ata_port_operations sl82c105_port_ops = {
28732 .inherits = &ata_bmdma_port_ops,
28733 .qc_defer = sl82c105_qc_defer,
28734 .bmdma_start = sl82c105_bmdma_start,
28735diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
28736index f1f13ff..df39e99 100644
28737--- a/drivers/ata/pata_triflex.c
28738+++ b/drivers/ata/pata_triflex.c
28739@@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
28740 ATA_BMDMA_SHT(DRV_NAME),
28741 };
28742
28743-static struct ata_port_operations triflex_port_ops = {
28744+static const struct ata_port_operations triflex_port_ops = {
28745 .inherits = &ata_bmdma_port_ops,
28746 .bmdma_start = triflex_bmdma_start,
28747 .bmdma_stop = triflex_bmdma_stop,
28748diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
28749index 1d73b8d..98a4b29 100644
28750--- a/drivers/ata/pata_via.c
28751+++ b/drivers/ata/pata_via.c
28752@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
28753 ATA_BMDMA_SHT(DRV_NAME),
28754 };
28755
28756-static struct ata_port_operations via_port_ops = {
28757+static const struct ata_port_operations via_port_ops = {
28758 .inherits = &ata_bmdma_port_ops,
28759 .cable_detect = via_cable_detect,
28760 .set_piomode = via_set_piomode,
28761@@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
28762 .port_start = via_port_start,
28763 };
28764
28765-static struct ata_port_operations via_port_ops_noirq = {
28766+static const struct ata_port_operations via_port_ops_noirq = {
28767 .inherits = &via_port_ops,
28768 .sff_data_xfer = ata_sff_data_xfer_noirq,
28769 };
28770diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
28771index 6d8619b..ad511c4 100644
28772--- a/drivers/ata/pata_winbond.c
28773+++ b/drivers/ata/pata_winbond.c
28774@@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
28775 ATA_PIO_SHT(DRV_NAME),
28776 };
28777
28778-static struct ata_port_operations winbond_port_ops = {
28779+static const struct ata_port_operations winbond_port_ops = {
28780 .inherits = &ata_sff_port_ops,
28781 .sff_data_xfer = winbond_data_xfer,
28782 .cable_detect = ata_cable_40wire,
28783diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
28784index 6c65b07..f996ec7 100644
28785--- a/drivers/ata/pdc_adma.c
28786+++ b/drivers/ata/pdc_adma.c
28787@@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
28788 .dma_boundary = ADMA_DMA_BOUNDARY,
28789 };
28790
28791-static struct ata_port_operations adma_ata_ops = {
28792+static const struct ata_port_operations adma_ata_ops = {
28793 .inherits = &ata_sff_port_ops,
28794
28795 .lost_interrupt = ATA_OP_NULL,
28796diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
28797index 172b57e..c49bc1e 100644
28798--- a/drivers/ata/sata_fsl.c
28799+++ b/drivers/ata/sata_fsl.c
28800@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
28801 .dma_boundary = ATA_DMA_BOUNDARY,
28802 };
28803
28804-static struct ata_port_operations sata_fsl_ops = {
28805+static const struct ata_port_operations sata_fsl_ops = {
28806 .inherits = &sata_pmp_port_ops,
28807
28808 .qc_defer = ata_std_qc_defer,
28809diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
28810index 4406902..60603ef 100644
28811--- a/drivers/ata/sata_inic162x.c
28812+++ b/drivers/ata/sata_inic162x.c
28813@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
28814 return 0;
28815 }
28816
28817-static struct ata_port_operations inic_port_ops = {
28818+static const struct ata_port_operations inic_port_ops = {
28819 .inherits = &sata_port_ops,
28820
28821 .check_atapi_dma = inic_check_atapi_dma,
28822diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
28823index cf41126..8107be6 100644
28824--- a/drivers/ata/sata_mv.c
28825+++ b/drivers/ata/sata_mv.c
28826@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
28827 .dma_boundary = MV_DMA_BOUNDARY,
28828 };
28829
28830-static struct ata_port_operations mv5_ops = {
28831+static const struct ata_port_operations mv5_ops = {
28832 .inherits = &ata_sff_port_ops,
28833
28834 .lost_interrupt = ATA_OP_NULL,
28835@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
28836 .port_stop = mv_port_stop,
28837 };
28838
28839-static struct ata_port_operations mv6_ops = {
28840+static const struct ata_port_operations mv6_ops = {
28841 .inherits = &mv5_ops,
28842 .dev_config = mv6_dev_config,
28843 .scr_read = mv_scr_read,
28844@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
28845 .bmdma_status = mv_bmdma_status,
28846 };
28847
28848-static struct ata_port_operations mv_iie_ops = {
28849+static const struct ata_port_operations mv_iie_ops = {
28850 .inherits = &mv6_ops,
28851 .dev_config = ATA_OP_NULL,
28852 .qc_prep = mv_qc_prep_iie,
28853diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
28854index ae2297c..d5c9c33 100644
28855--- a/drivers/ata/sata_nv.c
28856+++ b/drivers/ata/sata_nv.c
28857@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
28858 * cases. Define nv_hardreset() which only kicks in for post-boot
28859 * probing and use it for all variants.
28860 */
28861-static struct ata_port_operations nv_generic_ops = {
28862+static const struct ata_port_operations nv_generic_ops = {
28863 .inherits = &ata_bmdma_port_ops,
28864 .lost_interrupt = ATA_OP_NULL,
28865 .scr_read = nv_scr_read,
28866@@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
28867 .hardreset = nv_hardreset,
28868 };
28869
28870-static struct ata_port_operations nv_nf2_ops = {
28871+static const struct ata_port_operations nv_nf2_ops = {
28872 .inherits = &nv_generic_ops,
28873 .freeze = nv_nf2_freeze,
28874 .thaw = nv_nf2_thaw,
28875 };
28876
28877-static struct ata_port_operations nv_ck804_ops = {
28878+static const struct ata_port_operations nv_ck804_ops = {
28879 .inherits = &nv_generic_ops,
28880 .freeze = nv_ck804_freeze,
28881 .thaw = nv_ck804_thaw,
28882 .host_stop = nv_ck804_host_stop,
28883 };
28884
28885-static struct ata_port_operations nv_adma_ops = {
28886+static const struct ata_port_operations nv_adma_ops = {
28887 .inherits = &nv_ck804_ops,
28888
28889 .check_atapi_dma = nv_adma_check_atapi_dma,
28890@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
28891 .host_stop = nv_adma_host_stop,
28892 };
28893
28894-static struct ata_port_operations nv_swncq_ops = {
28895+static const struct ata_port_operations nv_swncq_ops = {
28896 .inherits = &nv_generic_ops,
28897
28898 .qc_defer = ata_std_qc_defer,
28899diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
28900index 07d8d00..6cc70bb 100644
28901--- a/drivers/ata/sata_promise.c
28902+++ b/drivers/ata/sata_promise.c
28903@@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
28904 .error_handler = pdc_error_handler,
28905 };
28906
28907-static struct ata_port_operations pdc_sata_ops = {
28908+static const struct ata_port_operations pdc_sata_ops = {
28909 .inherits = &pdc_common_ops,
28910 .cable_detect = pdc_sata_cable_detect,
28911 .freeze = pdc_sata_freeze,
28912@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
28913
28914 /* First-generation chips need a more restrictive ->check_atapi_dma op,
28915 and ->freeze/thaw that ignore the hotplug controls. */
28916-static struct ata_port_operations pdc_old_sata_ops = {
28917+static const struct ata_port_operations pdc_old_sata_ops = {
28918 .inherits = &pdc_sata_ops,
28919 .freeze = pdc_freeze,
28920 .thaw = pdc_thaw,
28921 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
28922 };
28923
28924-static struct ata_port_operations pdc_pata_ops = {
28925+static const struct ata_port_operations pdc_pata_ops = {
28926 .inherits = &pdc_common_ops,
28927 .cable_detect = pdc_pata_cable_detect,
28928 .freeze = pdc_freeze,
28929diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
28930index 326c0cf..36ecebe 100644
28931--- a/drivers/ata/sata_qstor.c
28932+++ b/drivers/ata/sata_qstor.c
28933@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
28934 .dma_boundary = QS_DMA_BOUNDARY,
28935 };
28936
28937-static struct ata_port_operations qs_ata_ops = {
28938+static const struct ata_port_operations qs_ata_ops = {
28939 .inherits = &ata_sff_port_ops,
28940
28941 .check_atapi_dma = qs_check_atapi_dma,
28942diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
28943index 3cb69d5..0871d3c 100644
28944--- a/drivers/ata/sata_sil.c
28945+++ b/drivers/ata/sata_sil.c
28946@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
28947 .sg_tablesize = ATA_MAX_PRD
28948 };
28949
28950-static struct ata_port_operations sil_ops = {
28951+static const struct ata_port_operations sil_ops = {
28952 .inherits = &ata_bmdma32_port_ops,
28953 .dev_config = sil_dev_config,
28954 .set_mode = sil_set_mode,
28955diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
28956index e6946fc..eddb794 100644
28957--- a/drivers/ata/sata_sil24.c
28958+++ b/drivers/ata/sata_sil24.c
28959@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
28960 .dma_boundary = ATA_DMA_BOUNDARY,
28961 };
28962
28963-static struct ata_port_operations sil24_ops = {
28964+static const struct ata_port_operations sil24_ops = {
28965 .inherits = &sata_pmp_port_ops,
28966
28967 .qc_defer = sil24_qc_defer,
28968diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
28969index f8a91bf..9cb06b6 100644
28970--- a/drivers/ata/sata_sis.c
28971+++ b/drivers/ata/sata_sis.c
28972@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
28973 ATA_BMDMA_SHT(DRV_NAME),
28974 };
28975
28976-static struct ata_port_operations sis_ops = {
28977+static const struct ata_port_operations sis_ops = {
28978 .inherits = &ata_bmdma_port_ops,
28979 .scr_read = sis_scr_read,
28980 .scr_write = sis_scr_write,
28981diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
28982index 7257f2d..d04c6f5 100644
28983--- a/drivers/ata/sata_svw.c
28984+++ b/drivers/ata/sata_svw.c
28985@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
28986 };
28987
28988
28989-static struct ata_port_operations k2_sata_ops = {
28990+static const struct ata_port_operations k2_sata_ops = {
28991 .inherits = &ata_bmdma_port_ops,
28992 .sff_tf_load = k2_sata_tf_load,
28993 .sff_tf_read = k2_sata_tf_read,
28994diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
28995index bbcf970..cd0df0d 100644
28996--- a/drivers/ata/sata_sx4.c
28997+++ b/drivers/ata/sata_sx4.c
28998@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
28999 };
29000
29001 /* TODO: inherit from base port_ops after converting to new EH */
29002-static struct ata_port_operations pdc_20621_ops = {
29003+static const struct ata_port_operations pdc_20621_ops = {
29004 .inherits = &ata_sff_port_ops,
29005
29006 .check_atapi_dma = pdc_check_atapi_dma,
29007diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29008index e5bff47..089d859 100644
29009--- a/drivers/ata/sata_uli.c
29010+++ b/drivers/ata/sata_uli.c
29011@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29012 ATA_BMDMA_SHT(DRV_NAME),
29013 };
29014
29015-static struct ata_port_operations uli_ops = {
29016+static const struct ata_port_operations uli_ops = {
29017 .inherits = &ata_bmdma_port_ops,
29018 .scr_read = uli_scr_read,
29019 .scr_write = uli_scr_write,
29020diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
29021index f5dcca7..77b94eb 100644
29022--- a/drivers/ata/sata_via.c
29023+++ b/drivers/ata/sata_via.c
29024@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
29025 ATA_BMDMA_SHT(DRV_NAME),
29026 };
29027
29028-static struct ata_port_operations svia_base_ops = {
29029+static const struct ata_port_operations svia_base_ops = {
29030 .inherits = &ata_bmdma_port_ops,
29031 .sff_tf_load = svia_tf_load,
29032 };
29033
29034-static struct ata_port_operations vt6420_sata_ops = {
29035+static const struct ata_port_operations vt6420_sata_ops = {
29036 .inherits = &svia_base_ops,
29037 .freeze = svia_noop_freeze,
29038 .prereset = vt6420_prereset,
29039 .bmdma_start = vt6420_bmdma_start,
29040 };
29041
29042-static struct ata_port_operations vt6421_pata_ops = {
29043+static const struct ata_port_operations vt6421_pata_ops = {
29044 .inherits = &svia_base_ops,
29045 .cable_detect = vt6421_pata_cable_detect,
29046 .set_piomode = vt6421_set_pio_mode,
29047 .set_dmamode = vt6421_set_dma_mode,
29048 };
29049
29050-static struct ata_port_operations vt6421_sata_ops = {
29051+static const struct ata_port_operations vt6421_sata_ops = {
29052 .inherits = &svia_base_ops,
29053 .scr_read = svia_scr_read,
29054 .scr_write = svia_scr_write,
29055 };
29056
29057-static struct ata_port_operations vt8251_ops = {
29058+static const struct ata_port_operations vt8251_ops = {
29059 .inherits = &svia_base_ops,
29060 .hardreset = sata_std_hardreset,
29061 .scr_read = vt8251_scr_read,
29062diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
29063index 8b2a278..51e65d3 100644
29064--- a/drivers/ata/sata_vsc.c
29065+++ b/drivers/ata/sata_vsc.c
29066@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
29067 };
29068
29069
29070-static struct ata_port_operations vsc_sata_ops = {
29071+static const struct ata_port_operations vsc_sata_ops = {
29072 .inherits = &ata_bmdma_port_ops,
29073 /* The IRQ handling is not quite standard SFF behaviour so we
29074 cannot use the default lost interrupt handler */
29075diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29076index 5effec6..7e4019a 100644
29077--- a/drivers/atm/adummy.c
29078+++ b/drivers/atm/adummy.c
29079@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29080 vcc->pop(vcc, skb);
29081 else
29082 dev_kfree_skb_any(skb);
29083- atomic_inc(&vcc->stats->tx);
29084+ atomic_inc_unchecked(&vcc->stats->tx);
29085
29086 return 0;
29087 }
29088diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29089index 66e1813..26a27c6 100644
29090--- a/drivers/atm/ambassador.c
29091+++ b/drivers/atm/ambassador.c
29092@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29093 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29094
29095 // VC layer stats
29096- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29097+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29098
29099 // free the descriptor
29100 kfree (tx_descr);
29101@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29102 dump_skb ("<<<", vc, skb);
29103
29104 // VC layer stats
29105- atomic_inc(&atm_vcc->stats->rx);
29106+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29107 __net_timestamp(skb);
29108 // end of our responsability
29109 atm_vcc->push (atm_vcc, skb);
29110@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29111 } else {
29112 PRINTK (KERN_INFO, "dropped over-size frame");
29113 // should we count this?
29114- atomic_inc(&atm_vcc->stats->rx_drop);
29115+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29116 }
29117
29118 } else {
29119@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29120 }
29121
29122 if (check_area (skb->data, skb->len)) {
29123- atomic_inc(&atm_vcc->stats->tx_err);
29124+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29125 return -ENOMEM; // ?
29126 }
29127
29128diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29129index 02ad83d..6daffeb 100644
29130--- a/drivers/atm/atmtcp.c
29131+++ b/drivers/atm/atmtcp.c
29132@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29133 if (vcc->pop) vcc->pop(vcc,skb);
29134 else dev_kfree_skb(skb);
29135 if (dev_data) return 0;
29136- atomic_inc(&vcc->stats->tx_err);
29137+ atomic_inc_unchecked(&vcc->stats->tx_err);
29138 return -ENOLINK;
29139 }
29140 size = skb->len+sizeof(struct atmtcp_hdr);
29141@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29142 if (!new_skb) {
29143 if (vcc->pop) vcc->pop(vcc,skb);
29144 else dev_kfree_skb(skb);
29145- atomic_inc(&vcc->stats->tx_err);
29146+ atomic_inc_unchecked(&vcc->stats->tx_err);
29147 return -ENOBUFS;
29148 }
29149 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29150@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29151 if (vcc->pop) vcc->pop(vcc,skb);
29152 else dev_kfree_skb(skb);
29153 out_vcc->push(out_vcc,new_skb);
29154- atomic_inc(&vcc->stats->tx);
29155- atomic_inc(&out_vcc->stats->rx);
29156+ atomic_inc_unchecked(&vcc->stats->tx);
29157+ atomic_inc_unchecked(&out_vcc->stats->rx);
29158 return 0;
29159 }
29160
29161@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29162 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29163 read_unlock(&vcc_sklist_lock);
29164 if (!out_vcc) {
29165- atomic_inc(&vcc->stats->tx_err);
29166+ atomic_inc_unchecked(&vcc->stats->tx_err);
29167 goto done;
29168 }
29169 skb_pull(skb,sizeof(struct atmtcp_hdr));
29170@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29171 __net_timestamp(new_skb);
29172 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29173 out_vcc->push(out_vcc,new_skb);
29174- atomic_inc(&vcc->stats->tx);
29175- atomic_inc(&out_vcc->stats->rx);
29176+ atomic_inc_unchecked(&vcc->stats->tx);
29177+ atomic_inc_unchecked(&out_vcc->stats->rx);
29178 done:
29179 if (vcc->pop) vcc->pop(vcc,skb);
29180 else dev_kfree_skb(skb);
29181diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29182index 0c30261..3da356e 100644
29183--- a/drivers/atm/eni.c
29184+++ b/drivers/atm/eni.c
29185@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29186 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29187 vcc->dev->number);
29188 length = 0;
29189- atomic_inc(&vcc->stats->rx_err);
29190+ atomic_inc_unchecked(&vcc->stats->rx_err);
29191 }
29192 else {
29193 length = ATM_CELL_SIZE-1; /* no HEC */
29194@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29195 size);
29196 }
29197 eff = length = 0;
29198- atomic_inc(&vcc->stats->rx_err);
29199+ atomic_inc_unchecked(&vcc->stats->rx_err);
29200 }
29201 else {
29202 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29203@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29204 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29205 vcc->dev->number,vcc->vci,length,size << 2,descr);
29206 length = eff = 0;
29207- atomic_inc(&vcc->stats->rx_err);
29208+ atomic_inc_unchecked(&vcc->stats->rx_err);
29209 }
29210 }
29211 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29212@@ -770,7 +770,7 @@ rx_dequeued++;
29213 vcc->push(vcc,skb);
29214 pushed++;
29215 }
29216- atomic_inc(&vcc->stats->rx);
29217+ atomic_inc_unchecked(&vcc->stats->rx);
29218 }
29219 wake_up(&eni_dev->rx_wait);
29220 }
29221@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29222 PCI_DMA_TODEVICE);
29223 if (vcc->pop) vcc->pop(vcc,skb);
29224 else dev_kfree_skb_irq(skb);
29225- atomic_inc(&vcc->stats->tx);
29226+ atomic_inc_unchecked(&vcc->stats->tx);
29227 wake_up(&eni_dev->tx_wait);
29228 dma_complete++;
29229 }
29230@@ -1570,7 +1570,7 @@ tx_complete++;
29231 /*--------------------------------- entries ---------------------------------*/
29232
29233
29234-static const char *media_name[] __devinitdata = {
29235+static const char *media_name[] __devinitconst = {
29236 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
29237 "UTP", "05?", "06?", "07?", /* 4- 7 */
29238 "TAXI","09?", "10?", "11?", /* 8-11 */
29239diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29240index cd5049a..a51209f 100644
29241--- a/drivers/atm/firestream.c
29242+++ b/drivers/atm/firestream.c
29243@@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29244 }
29245 }
29246
29247- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29248+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29249
29250 fs_dprintk (FS_DEBUG_TXMEM, "i");
29251 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29252@@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29253 #endif
29254 skb_put (skb, qe->p1 & 0xffff);
29255 ATM_SKB(skb)->vcc = atm_vcc;
29256- atomic_inc(&atm_vcc->stats->rx);
29257+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29258 __net_timestamp(skb);
29259 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29260 atm_vcc->push (atm_vcc, skb);
29261@@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29262 kfree (pe);
29263 }
29264 if (atm_vcc)
29265- atomic_inc(&atm_vcc->stats->rx_drop);
29266+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29267 break;
29268 case 0x1f: /* Reassembly abort: no buffers. */
29269 /* Silently increment error counter. */
29270 if (atm_vcc)
29271- atomic_inc(&atm_vcc->stats->rx_drop);
29272+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29273 break;
29274 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29275 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29276diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29277index f766cc4..a34002e 100644
29278--- a/drivers/atm/fore200e.c
29279+++ b/drivers/atm/fore200e.c
29280@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29281 #endif
29282 /* check error condition */
29283 if (*entry->status & STATUS_ERROR)
29284- atomic_inc(&vcc->stats->tx_err);
29285+ atomic_inc_unchecked(&vcc->stats->tx_err);
29286 else
29287- atomic_inc(&vcc->stats->tx);
29288+ atomic_inc_unchecked(&vcc->stats->tx);
29289 }
29290 }
29291
29292@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29293 if (skb == NULL) {
29294 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29295
29296- atomic_inc(&vcc->stats->rx_drop);
29297+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29298 return -ENOMEM;
29299 }
29300
29301@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29302
29303 dev_kfree_skb_any(skb);
29304
29305- atomic_inc(&vcc->stats->rx_drop);
29306+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29307 return -ENOMEM;
29308 }
29309
29310 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29311
29312 vcc->push(vcc, skb);
29313- atomic_inc(&vcc->stats->rx);
29314+ atomic_inc_unchecked(&vcc->stats->rx);
29315
29316 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29317
29318@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29319 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29320 fore200e->atm_dev->number,
29321 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29322- atomic_inc(&vcc->stats->rx_err);
29323+ atomic_inc_unchecked(&vcc->stats->rx_err);
29324 }
29325 }
29326
29327@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29328 goto retry_here;
29329 }
29330
29331- atomic_inc(&vcc->stats->tx_err);
29332+ atomic_inc_unchecked(&vcc->stats->tx_err);
29333
29334 fore200e->tx_sat++;
29335 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29336diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29337index 7066703..2b130de 100644
29338--- a/drivers/atm/he.c
29339+++ b/drivers/atm/he.c
29340@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29341
29342 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29343 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29344- atomic_inc(&vcc->stats->rx_drop);
29345+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29346 goto return_host_buffers;
29347 }
29348
29349@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29350 RBRQ_LEN_ERR(he_dev->rbrq_head)
29351 ? "LEN_ERR" : "",
29352 vcc->vpi, vcc->vci);
29353- atomic_inc(&vcc->stats->rx_err);
29354+ atomic_inc_unchecked(&vcc->stats->rx_err);
29355 goto return_host_buffers;
29356 }
29357
29358@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29359 vcc->push(vcc, skb);
29360 spin_lock(&he_dev->global_lock);
29361
29362- atomic_inc(&vcc->stats->rx);
29363+ atomic_inc_unchecked(&vcc->stats->rx);
29364
29365 return_host_buffers:
29366 ++pdus_assembled;
29367@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29368 tpd->vcc->pop(tpd->vcc, tpd->skb);
29369 else
29370 dev_kfree_skb_any(tpd->skb);
29371- atomic_inc(&tpd->vcc->stats->tx_err);
29372+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29373 }
29374 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29375 return;
29376@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29377 vcc->pop(vcc, skb);
29378 else
29379 dev_kfree_skb_any(skb);
29380- atomic_inc(&vcc->stats->tx_err);
29381+ atomic_inc_unchecked(&vcc->stats->tx_err);
29382 return -EINVAL;
29383 }
29384
29385@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29386 vcc->pop(vcc, skb);
29387 else
29388 dev_kfree_skb_any(skb);
29389- atomic_inc(&vcc->stats->tx_err);
29390+ atomic_inc_unchecked(&vcc->stats->tx_err);
29391 return -EINVAL;
29392 }
29393 #endif
29394@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29395 vcc->pop(vcc, skb);
29396 else
29397 dev_kfree_skb_any(skb);
29398- atomic_inc(&vcc->stats->tx_err);
29399+ atomic_inc_unchecked(&vcc->stats->tx_err);
29400 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29401 return -ENOMEM;
29402 }
29403@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29404 vcc->pop(vcc, skb);
29405 else
29406 dev_kfree_skb_any(skb);
29407- atomic_inc(&vcc->stats->tx_err);
29408+ atomic_inc_unchecked(&vcc->stats->tx_err);
29409 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29410 return -ENOMEM;
29411 }
29412@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29413 __enqueue_tpd(he_dev, tpd, cid);
29414 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29415
29416- atomic_inc(&vcc->stats->tx);
29417+ atomic_inc_unchecked(&vcc->stats->tx);
29418
29419 return 0;
29420 }
29421diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29422index 4e49021..01b1512 100644
29423--- a/drivers/atm/horizon.c
29424+++ b/drivers/atm/horizon.c
29425@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29426 {
29427 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29428 // VC layer stats
29429- atomic_inc(&vcc->stats->rx);
29430+ atomic_inc_unchecked(&vcc->stats->rx);
29431 __net_timestamp(skb);
29432 // end of our responsability
29433 vcc->push (vcc, skb);
29434@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29435 dev->tx_iovec = NULL;
29436
29437 // VC layer stats
29438- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29439+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29440
29441 // free the skb
29442 hrz_kfree_skb (skb);
29443diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29444index e33ae00..9deb4ab 100644
29445--- a/drivers/atm/idt77252.c
29446+++ b/drivers/atm/idt77252.c
29447@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29448 else
29449 dev_kfree_skb(skb);
29450
29451- atomic_inc(&vcc->stats->tx);
29452+ atomic_inc_unchecked(&vcc->stats->tx);
29453 }
29454
29455 atomic_dec(&scq->used);
29456@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29457 if ((sb = dev_alloc_skb(64)) == NULL) {
29458 printk("%s: Can't allocate buffers for aal0.\n",
29459 card->name);
29460- atomic_add(i, &vcc->stats->rx_drop);
29461+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
29462 break;
29463 }
29464 if (!atm_charge(vcc, sb->truesize)) {
29465 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29466 card->name);
29467- atomic_add(i - 1, &vcc->stats->rx_drop);
29468+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29469 dev_kfree_skb(sb);
29470 break;
29471 }
29472@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29473 ATM_SKB(sb)->vcc = vcc;
29474 __net_timestamp(sb);
29475 vcc->push(vcc, sb);
29476- atomic_inc(&vcc->stats->rx);
29477+ atomic_inc_unchecked(&vcc->stats->rx);
29478
29479 cell += ATM_CELL_PAYLOAD;
29480 }
29481@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29482 "(CDC: %08x)\n",
29483 card->name, len, rpp->len, readl(SAR_REG_CDC));
29484 recycle_rx_pool_skb(card, rpp);
29485- atomic_inc(&vcc->stats->rx_err);
29486+ atomic_inc_unchecked(&vcc->stats->rx_err);
29487 return;
29488 }
29489 if (stat & SAR_RSQE_CRC) {
29490 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29491 recycle_rx_pool_skb(card, rpp);
29492- atomic_inc(&vcc->stats->rx_err);
29493+ atomic_inc_unchecked(&vcc->stats->rx_err);
29494 return;
29495 }
29496 if (skb_queue_len(&rpp->queue) > 1) {
29497@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29498 RXPRINTK("%s: Can't alloc RX skb.\n",
29499 card->name);
29500 recycle_rx_pool_skb(card, rpp);
29501- atomic_inc(&vcc->stats->rx_err);
29502+ atomic_inc_unchecked(&vcc->stats->rx_err);
29503 return;
29504 }
29505 if (!atm_charge(vcc, skb->truesize)) {
29506@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29507 __net_timestamp(skb);
29508
29509 vcc->push(vcc, skb);
29510- atomic_inc(&vcc->stats->rx);
29511+ atomic_inc_unchecked(&vcc->stats->rx);
29512
29513 return;
29514 }
29515@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29516 __net_timestamp(skb);
29517
29518 vcc->push(vcc, skb);
29519- atomic_inc(&vcc->stats->rx);
29520+ atomic_inc_unchecked(&vcc->stats->rx);
29521
29522 if (skb->truesize > SAR_FB_SIZE_3)
29523 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
29524@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
29525 if (vcc->qos.aal != ATM_AAL0) {
29526 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
29527 card->name, vpi, vci);
29528- atomic_inc(&vcc->stats->rx_drop);
29529+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29530 goto drop;
29531 }
29532
29533 if ((sb = dev_alloc_skb(64)) == NULL) {
29534 printk("%s: Can't allocate buffers for AAL0.\n",
29535 card->name);
29536- atomic_inc(&vcc->stats->rx_err);
29537+ atomic_inc_unchecked(&vcc->stats->rx_err);
29538 goto drop;
29539 }
29540
29541@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
29542 ATM_SKB(sb)->vcc = vcc;
29543 __net_timestamp(sb);
29544 vcc->push(vcc, sb);
29545- atomic_inc(&vcc->stats->rx);
29546+ atomic_inc_unchecked(&vcc->stats->rx);
29547
29548 drop:
29549 skb_pull(queue, 64);
29550@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29551
29552 if (vc == NULL) {
29553 printk("%s: NULL connection in send().\n", card->name);
29554- atomic_inc(&vcc->stats->tx_err);
29555+ atomic_inc_unchecked(&vcc->stats->tx_err);
29556 dev_kfree_skb(skb);
29557 return -EINVAL;
29558 }
29559 if (!test_bit(VCF_TX, &vc->flags)) {
29560 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
29561- atomic_inc(&vcc->stats->tx_err);
29562+ atomic_inc_unchecked(&vcc->stats->tx_err);
29563 dev_kfree_skb(skb);
29564 return -EINVAL;
29565 }
29566@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29567 break;
29568 default:
29569 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
29570- atomic_inc(&vcc->stats->tx_err);
29571+ atomic_inc_unchecked(&vcc->stats->tx_err);
29572 dev_kfree_skb(skb);
29573 return -EINVAL;
29574 }
29575
29576 if (skb_shinfo(skb)->nr_frags != 0) {
29577 printk("%s: No scatter-gather yet.\n", card->name);
29578- atomic_inc(&vcc->stats->tx_err);
29579+ atomic_inc_unchecked(&vcc->stats->tx_err);
29580 dev_kfree_skb(skb);
29581 return -EINVAL;
29582 }
29583@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29584
29585 err = queue_skb(card, vc, skb, oam);
29586 if (err) {
29587- atomic_inc(&vcc->stats->tx_err);
29588+ atomic_inc_unchecked(&vcc->stats->tx_err);
29589 dev_kfree_skb(skb);
29590 return err;
29591 }
29592@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
29593 skb = dev_alloc_skb(64);
29594 if (!skb) {
29595 printk("%s: Out of memory in send_oam().\n", card->name);
29596- atomic_inc(&vcc->stats->tx_err);
29597+ atomic_inc_unchecked(&vcc->stats->tx_err);
29598 return -ENOMEM;
29599 }
29600 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
29601diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
29602index b2c1b37..faa672b 100644
29603--- a/drivers/atm/iphase.c
29604+++ b/drivers/atm/iphase.c
29605@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
29606 status = (u_short) (buf_desc_ptr->desc_mode);
29607 if (status & (RX_CER | RX_PTE | RX_OFL))
29608 {
29609- atomic_inc(&vcc->stats->rx_err);
29610+ atomic_inc_unchecked(&vcc->stats->rx_err);
29611 IF_ERR(printk("IA: bad packet, dropping it");)
29612 if (status & RX_CER) {
29613 IF_ERR(printk(" cause: packet CRC error\n");)
29614@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
29615 len = dma_addr - buf_addr;
29616 if (len > iadev->rx_buf_sz) {
29617 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
29618- atomic_inc(&vcc->stats->rx_err);
29619+ atomic_inc_unchecked(&vcc->stats->rx_err);
29620 goto out_free_desc;
29621 }
29622
29623@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29624 ia_vcc = INPH_IA_VCC(vcc);
29625 if (ia_vcc == NULL)
29626 {
29627- atomic_inc(&vcc->stats->rx_err);
29628+ atomic_inc_unchecked(&vcc->stats->rx_err);
29629 dev_kfree_skb_any(skb);
29630 atm_return(vcc, atm_guess_pdu2truesize(len));
29631 goto INCR_DLE;
29632@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29633 if ((length > iadev->rx_buf_sz) || (length >
29634 (skb->len - sizeof(struct cpcs_trailer))))
29635 {
29636- atomic_inc(&vcc->stats->rx_err);
29637+ atomic_inc_unchecked(&vcc->stats->rx_err);
29638 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
29639 length, skb->len);)
29640 dev_kfree_skb_any(skb);
29641@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29642
29643 IF_RX(printk("rx_dle_intr: skb push");)
29644 vcc->push(vcc,skb);
29645- atomic_inc(&vcc->stats->rx);
29646+ atomic_inc_unchecked(&vcc->stats->rx);
29647 iadev->rx_pkt_cnt++;
29648 }
29649 INCR_DLE:
29650@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
29651 {
29652 struct k_sonet_stats *stats;
29653 stats = &PRIV(_ia_dev[board])->sonet_stats;
29654- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
29655- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
29656- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
29657- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
29658- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
29659- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
29660- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
29661- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
29662- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
29663+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
29664+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
29665+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
29666+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
29667+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
29668+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
29669+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
29670+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
29671+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
29672 }
29673 ia_cmds.status = 0;
29674 break;
29675@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29676 if ((desc == 0) || (desc > iadev->num_tx_desc))
29677 {
29678 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
29679- atomic_inc(&vcc->stats->tx);
29680+ atomic_inc_unchecked(&vcc->stats->tx);
29681 if (vcc->pop)
29682 vcc->pop(vcc, skb);
29683 else
29684@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29685 ATM_DESC(skb) = vcc->vci;
29686 skb_queue_tail(&iadev->tx_dma_q, skb);
29687
29688- atomic_inc(&vcc->stats->tx);
29689+ atomic_inc_unchecked(&vcc->stats->tx);
29690 iadev->tx_pkt_cnt++;
29691 /* Increment transaction counter */
29692 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
29693
29694 #if 0
29695 /* add flow control logic */
29696- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
29697+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
29698 if (iavcc->vc_desc_cnt > 10) {
29699 vcc->tx_quota = vcc->tx_quota * 3 / 4;
29700 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
29701diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
29702index cf97c34..8d30655 100644
29703--- a/drivers/atm/lanai.c
29704+++ b/drivers/atm/lanai.c
29705@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
29706 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
29707 lanai_endtx(lanai, lvcc);
29708 lanai_free_skb(lvcc->tx.atmvcc, skb);
29709- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
29710+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
29711 }
29712
29713 /* Try to fill the buffer - don't call unless there is backlog */
29714@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
29715 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
29716 __net_timestamp(skb);
29717 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
29718- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
29719+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
29720 out:
29721 lvcc->rx.buf.ptr = end;
29722 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
29723@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29724 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
29725 "vcc %d\n", lanai->number, (unsigned int) s, vci);
29726 lanai->stats.service_rxnotaal5++;
29727- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29728+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29729 return 0;
29730 }
29731 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
29732@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29733 int bytes;
29734 read_unlock(&vcc_sklist_lock);
29735 DPRINTK("got trashed rx pdu on vci %d\n", vci);
29736- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29737+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29738 lvcc->stats.x.aal5.service_trash++;
29739 bytes = (SERVICE_GET_END(s) * 16) -
29740 (((unsigned long) lvcc->rx.buf.ptr) -
29741@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29742 }
29743 if (s & SERVICE_STREAM) {
29744 read_unlock(&vcc_sklist_lock);
29745- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29746+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29747 lvcc->stats.x.aal5.service_stream++;
29748 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
29749 "PDU on VCI %d!\n", lanai->number, vci);
29750@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29751 return 0;
29752 }
29753 DPRINTK("got rx crc error on vci %d\n", vci);
29754- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29755+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29756 lvcc->stats.x.aal5.service_rxcrc++;
29757 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
29758 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
29759diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
29760index 3da804b..d3b0eed 100644
29761--- a/drivers/atm/nicstar.c
29762+++ b/drivers/atm/nicstar.c
29763@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29764 if ((vc = (vc_map *) vcc->dev_data) == NULL)
29765 {
29766 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
29767- atomic_inc(&vcc->stats->tx_err);
29768+ atomic_inc_unchecked(&vcc->stats->tx_err);
29769 dev_kfree_skb_any(skb);
29770 return -EINVAL;
29771 }
29772@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29773 if (!vc->tx)
29774 {
29775 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
29776- atomic_inc(&vcc->stats->tx_err);
29777+ atomic_inc_unchecked(&vcc->stats->tx_err);
29778 dev_kfree_skb_any(skb);
29779 return -EINVAL;
29780 }
29781@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29782 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
29783 {
29784 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
29785- atomic_inc(&vcc->stats->tx_err);
29786+ atomic_inc_unchecked(&vcc->stats->tx_err);
29787 dev_kfree_skb_any(skb);
29788 return -EINVAL;
29789 }
29790@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29791 if (skb_shinfo(skb)->nr_frags != 0)
29792 {
29793 printk("nicstar%d: No scatter-gather yet.\n", card->index);
29794- atomic_inc(&vcc->stats->tx_err);
29795+ atomic_inc_unchecked(&vcc->stats->tx_err);
29796 dev_kfree_skb_any(skb);
29797 return -EINVAL;
29798 }
29799@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29800
29801 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
29802 {
29803- atomic_inc(&vcc->stats->tx_err);
29804+ atomic_inc_unchecked(&vcc->stats->tx_err);
29805 dev_kfree_skb_any(skb);
29806 return -EIO;
29807 }
29808- atomic_inc(&vcc->stats->tx);
29809+ atomic_inc_unchecked(&vcc->stats->tx);
29810
29811 return 0;
29812 }
29813@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29814 {
29815 printk("nicstar%d: Can't allocate buffers for aal0.\n",
29816 card->index);
29817- atomic_add(i,&vcc->stats->rx_drop);
29818+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
29819 break;
29820 }
29821 if (!atm_charge(vcc, sb->truesize))
29822 {
29823 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
29824 card->index);
29825- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
29826+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
29827 dev_kfree_skb_any(sb);
29828 break;
29829 }
29830@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29831 ATM_SKB(sb)->vcc = vcc;
29832 __net_timestamp(sb);
29833 vcc->push(vcc, sb);
29834- atomic_inc(&vcc->stats->rx);
29835+ atomic_inc_unchecked(&vcc->stats->rx);
29836 cell += ATM_CELL_PAYLOAD;
29837 }
29838
29839@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29840 if (iovb == NULL)
29841 {
29842 printk("nicstar%d: Out of iovec buffers.\n", card->index);
29843- atomic_inc(&vcc->stats->rx_drop);
29844+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29845 recycle_rx_buf(card, skb);
29846 return;
29847 }
29848@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29849 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
29850 {
29851 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
29852- atomic_inc(&vcc->stats->rx_err);
29853+ atomic_inc_unchecked(&vcc->stats->rx_err);
29854 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
29855 NS_SKB(iovb)->iovcnt = 0;
29856 iovb->len = 0;
29857@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29858 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
29859 card->index);
29860 which_list(card, skb);
29861- atomic_inc(&vcc->stats->rx_err);
29862+ atomic_inc_unchecked(&vcc->stats->rx_err);
29863 recycle_rx_buf(card, skb);
29864 vc->rx_iov = NULL;
29865 recycle_iov_buf(card, iovb);
29866@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29867 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
29868 card->index);
29869 which_list(card, skb);
29870- atomic_inc(&vcc->stats->rx_err);
29871+ atomic_inc_unchecked(&vcc->stats->rx_err);
29872 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
29873 NS_SKB(iovb)->iovcnt);
29874 vc->rx_iov = NULL;
29875@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29876 printk(" - PDU size mismatch.\n");
29877 else
29878 printk(".\n");
29879- atomic_inc(&vcc->stats->rx_err);
29880+ atomic_inc_unchecked(&vcc->stats->rx_err);
29881 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
29882 NS_SKB(iovb)->iovcnt);
29883 vc->rx_iov = NULL;
29884@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29885 if (!atm_charge(vcc, skb->truesize))
29886 {
29887 push_rxbufs(card, skb);
29888- atomic_inc(&vcc->stats->rx_drop);
29889+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29890 }
29891 else
29892 {
29893@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29894 ATM_SKB(skb)->vcc = vcc;
29895 __net_timestamp(skb);
29896 vcc->push(vcc, skb);
29897- atomic_inc(&vcc->stats->rx);
29898+ atomic_inc_unchecked(&vcc->stats->rx);
29899 }
29900 }
29901 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
29902@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29903 if (!atm_charge(vcc, sb->truesize))
29904 {
29905 push_rxbufs(card, sb);
29906- atomic_inc(&vcc->stats->rx_drop);
29907+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29908 }
29909 else
29910 {
29911@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29912 ATM_SKB(sb)->vcc = vcc;
29913 __net_timestamp(sb);
29914 vcc->push(vcc, sb);
29915- atomic_inc(&vcc->stats->rx);
29916+ atomic_inc_unchecked(&vcc->stats->rx);
29917 }
29918
29919 push_rxbufs(card, skb);
29920@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29921 if (!atm_charge(vcc, skb->truesize))
29922 {
29923 push_rxbufs(card, skb);
29924- atomic_inc(&vcc->stats->rx_drop);
29925+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29926 }
29927 else
29928 {
29929@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29930 ATM_SKB(skb)->vcc = vcc;
29931 __net_timestamp(skb);
29932 vcc->push(vcc, skb);
29933- atomic_inc(&vcc->stats->rx);
29934+ atomic_inc_unchecked(&vcc->stats->rx);
29935 }
29936
29937 push_rxbufs(card, sb);
29938@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29939 if (hb == NULL)
29940 {
29941 printk("nicstar%d: Out of huge buffers.\n", card->index);
29942- atomic_inc(&vcc->stats->rx_drop);
29943+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29944 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
29945 NS_SKB(iovb)->iovcnt);
29946 vc->rx_iov = NULL;
29947@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29948 }
29949 else
29950 dev_kfree_skb_any(hb);
29951- atomic_inc(&vcc->stats->rx_drop);
29952+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29953 }
29954 else
29955 {
29956@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29957 #endif /* NS_USE_DESTRUCTORS */
29958 __net_timestamp(hb);
29959 vcc->push(vcc, hb);
29960- atomic_inc(&vcc->stats->rx);
29961+ atomic_inc_unchecked(&vcc->stats->rx);
29962 }
29963 }
29964
29965diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
29966index 84c93ff..e6ed269 100644
29967--- a/drivers/atm/solos-pci.c
29968+++ b/drivers/atm/solos-pci.c
29969@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
29970 }
29971 atm_charge(vcc, skb->truesize);
29972 vcc->push(vcc, skb);
29973- atomic_inc(&vcc->stats->rx);
29974+ atomic_inc_unchecked(&vcc->stats->rx);
29975 break;
29976
29977 case PKT_STATUS:
29978@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
29979 char msg[500];
29980 char item[10];
29981
29982+ pax_track_stack();
29983+
29984 len = buf->len;
29985 for (i = 0; i < len; i++){
29986 if(i % 8 == 0)
29987@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
29988 vcc = SKB_CB(oldskb)->vcc;
29989
29990 if (vcc) {
29991- atomic_inc(&vcc->stats->tx);
29992+ atomic_inc_unchecked(&vcc->stats->tx);
29993 solos_pop(vcc, oldskb);
29994 } else
29995 dev_kfree_skb_irq(oldskb);
29996diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
29997index 6dd3f59..ee377f3 100644
29998--- a/drivers/atm/suni.c
29999+++ b/drivers/atm/suni.c
30000@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30001
30002
30003 #define ADD_LIMITED(s,v) \
30004- atomic_add((v),&stats->s); \
30005- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30006+ atomic_add_unchecked((v),&stats->s); \
30007+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30008
30009
30010 static void suni_hz(unsigned long from_timer)
30011diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30012index fc8cb07..4a80e53 100644
30013--- a/drivers/atm/uPD98402.c
30014+++ b/drivers/atm/uPD98402.c
30015@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30016 struct sonet_stats tmp;
30017 int error = 0;
30018
30019- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30020+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30021 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30022 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30023 if (zero && !error) {
30024@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30025
30026
30027 #define ADD_LIMITED(s,v) \
30028- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30029- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30030- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30031+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30032+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30033+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30034
30035
30036 static void stat_event(struct atm_dev *dev)
30037@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
30038 if (reason & uPD98402_INT_PFM) stat_event(dev);
30039 if (reason & uPD98402_INT_PCO) {
30040 (void) GET(PCOCR); /* clear interrupt cause */
30041- atomic_add(GET(HECCT),
30042+ atomic_add_unchecked(GET(HECCT),
30043 &PRIV(dev)->sonet_stats.uncorr_hcs);
30044 }
30045 if ((reason & uPD98402_INT_RFO) &&
30046@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
30047 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30048 uPD98402_INT_LOS),PIMR); /* enable them */
30049 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30050- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30051- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30052- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30053+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30054+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30055+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30056 return 0;
30057 }
30058
30059diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30060index 2e9635b..32927b4 100644
30061--- a/drivers/atm/zatm.c
30062+++ b/drivers/atm/zatm.c
30063@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30064 }
30065 if (!size) {
30066 dev_kfree_skb_irq(skb);
30067- if (vcc) atomic_inc(&vcc->stats->rx_err);
30068+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30069 continue;
30070 }
30071 if (!atm_charge(vcc,skb->truesize)) {
30072@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30073 skb->len = size;
30074 ATM_SKB(skb)->vcc = vcc;
30075 vcc->push(vcc,skb);
30076- atomic_inc(&vcc->stats->rx);
30077+ atomic_inc_unchecked(&vcc->stats->rx);
30078 }
30079 zout(pos & 0xffff,MTA(mbx));
30080 #if 0 /* probably a stupid idea */
30081@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30082 skb_queue_head(&zatm_vcc->backlog,skb);
30083 break;
30084 }
30085- atomic_inc(&vcc->stats->tx);
30086+ atomic_inc_unchecked(&vcc->stats->tx);
30087 wake_up(&zatm_vcc->tx_wait);
30088 }
30089
30090diff --git a/drivers/base/bus.c b/drivers/base/bus.c
30091index 63c143e..fece183 100644
30092--- a/drivers/base/bus.c
30093+++ b/drivers/base/bus.c
30094@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
30095 return ret;
30096 }
30097
30098-static struct sysfs_ops driver_sysfs_ops = {
30099+static const struct sysfs_ops driver_sysfs_ops = {
30100 .show = drv_attr_show,
30101 .store = drv_attr_store,
30102 };
30103@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
30104 return ret;
30105 }
30106
30107-static struct sysfs_ops bus_sysfs_ops = {
30108+static const struct sysfs_ops bus_sysfs_ops = {
30109 .show = bus_attr_show,
30110 .store = bus_attr_store,
30111 };
30112@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
30113 return 0;
30114 }
30115
30116-static struct kset_uevent_ops bus_uevent_ops = {
30117+static const struct kset_uevent_ops bus_uevent_ops = {
30118 .filter = bus_uevent_filter,
30119 };
30120
30121diff --git a/drivers/base/class.c b/drivers/base/class.c
30122index 6e2c3b0..cb61871 100644
30123--- a/drivers/base/class.c
30124+++ b/drivers/base/class.c
30125@@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
30126 kfree(cp);
30127 }
30128
30129-static struct sysfs_ops class_sysfs_ops = {
30130+static const struct sysfs_ops class_sysfs_ops = {
30131 .show = class_attr_show,
30132 .store = class_attr_store,
30133 };
30134diff --git a/drivers/base/core.c b/drivers/base/core.c
30135index f33d768..a9358d0 100644
30136--- a/drivers/base/core.c
30137+++ b/drivers/base/core.c
30138@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
30139 return ret;
30140 }
30141
30142-static struct sysfs_ops dev_sysfs_ops = {
30143+static const struct sysfs_ops dev_sysfs_ops = {
30144 .show = dev_attr_show,
30145 .store = dev_attr_store,
30146 };
30147@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
30148 return retval;
30149 }
30150
30151-static struct kset_uevent_ops device_uevent_ops = {
30152+static const struct kset_uevent_ops device_uevent_ops = {
30153 .filter = dev_uevent_filter,
30154 .name = dev_uevent_name,
30155 .uevent = dev_uevent,
30156diff --git a/drivers/base/memory.c b/drivers/base/memory.c
30157index 989429c..2272b00 100644
30158--- a/drivers/base/memory.c
30159+++ b/drivers/base/memory.c
30160@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
30161 return retval;
30162 }
30163
30164-static struct kset_uevent_ops memory_uevent_ops = {
30165+static const struct kset_uevent_ops memory_uevent_ops = {
30166 .name = memory_uevent_name,
30167 .uevent = memory_uevent,
30168 };
30169diff --git a/drivers/base/sys.c b/drivers/base/sys.c
30170index 3f202f7..61c4a6f 100644
30171--- a/drivers/base/sys.c
30172+++ b/drivers/base/sys.c
30173@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
30174 return -EIO;
30175 }
30176
30177-static struct sysfs_ops sysfs_ops = {
30178+static const struct sysfs_ops sysfs_ops = {
30179 .show = sysdev_show,
30180 .store = sysdev_store,
30181 };
30182@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
30183 return -EIO;
30184 }
30185
30186-static struct sysfs_ops sysfs_class_ops = {
30187+static const struct sysfs_ops sysfs_class_ops = {
30188 .show = sysdev_class_show,
30189 .store = sysdev_class_store,
30190 };
30191diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
30192index eb4fa19..1954777 100644
30193--- a/drivers/block/DAC960.c
30194+++ b/drivers/block/DAC960.c
30195@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
30196 unsigned long flags;
30197 int Channel, TargetID;
30198
30199+ pax_track_stack();
30200+
30201 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
30202 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
30203 sizeof(DAC960_SCSI_Inquiry_T) +
30204diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30205index ca9c548..ca6899c 100644
30206--- a/drivers/block/cciss.c
30207+++ b/drivers/block/cciss.c
30208@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30209 int err;
30210 u32 cp;
30211
30212+ memset(&arg64, 0, sizeof(arg64));
30213+
30214 err = 0;
30215 err |=
30216 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30217@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
30218 /* Wait (up to 20 seconds) for a command to complete */
30219
30220 for (i = 20 * HZ; i > 0; i--) {
30221- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30222+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30223 if (done == FIFO_EMPTY)
30224 schedule_timeout_uninterruptible(1);
30225 else
30226@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
30227 resend_cmd1:
30228
30229 /* Disable interrupt on the board. */
30230- h->access.set_intr_mask(h, CCISS_INTR_OFF);
30231+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
30232
30233 /* Make sure there is room in the command FIFO */
30234 /* Actually it should be completely empty at this time */
30235@@ -2884,13 +2886,13 @@ resend_cmd1:
30236 /* tape side of the driver. */
30237 for (i = 200000; i > 0; i--) {
30238 /* if fifo isn't full go */
30239- if (!(h->access.fifo_full(h)))
30240+ if (!(h->access->fifo_full(h)))
30241 break;
30242 udelay(10);
30243 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
30244 " waiting!\n", h->ctlr);
30245 }
30246- h->access.submit_command(h, c); /* Send the cmd */
30247+ h->access->submit_command(h, c); /* Send the cmd */
30248 do {
30249 complete = pollcomplete(h->ctlr);
30250
30251@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
30252 while (!hlist_empty(&h->reqQ)) {
30253 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
30254 /* can't do anything if fifo is full */
30255- if ((h->access.fifo_full(h))) {
30256+ if ((h->access->fifo_full(h))) {
30257 printk(KERN_WARNING "cciss: fifo full\n");
30258 break;
30259 }
30260@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
30261 h->Qdepth--;
30262
30263 /* Tell the controller execute command */
30264- h->access.submit_command(h, c);
30265+ h->access->submit_command(h, c);
30266
30267 /* Put job onto the completed Q */
30268 addQ(&h->cmpQ, c);
30269@@ -3393,17 +3395,17 @@ startio:
30270
30271 static inline unsigned long get_next_completion(ctlr_info_t *h)
30272 {
30273- return h->access.command_completed(h);
30274+ return h->access->command_completed(h);
30275 }
30276
30277 static inline int interrupt_pending(ctlr_info_t *h)
30278 {
30279- return h->access.intr_pending(h);
30280+ return h->access->intr_pending(h);
30281 }
30282
30283 static inline long interrupt_not_for_us(ctlr_info_t *h)
30284 {
30285- return (((h->access.intr_pending(h) == 0) ||
30286+ return (((h->access->intr_pending(h) == 0) ||
30287 (h->interrupts_enabled == 0)));
30288 }
30289
30290@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
30291 */
30292 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
30293 c->product_name = products[prod_index].product_name;
30294- c->access = *(products[prod_index].access);
30295+ c->access = products[prod_index].access;
30296 c->nr_cmds = c->max_commands - 4;
30297 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
30298 (readb(&c->cfgtable->Signature[1]) != 'I') ||
30299@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30300 }
30301
30302 /* make sure the board interrupts are off */
30303- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
30304+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
30305 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
30306 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
30307 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
30308@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30309 cciss_scsi_setup(i);
30310
30311 /* Turn the interrupts on so we can service requests */
30312- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
30313+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
30314
30315 /* Get the firmware version */
30316 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30317diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30318index 04d6bf8..36e712d 100644
30319--- a/drivers/block/cciss.h
30320+++ b/drivers/block/cciss.h
30321@@ -90,7 +90,7 @@ struct ctlr_info
30322 // information about each logical volume
30323 drive_info_struct *drv[CISS_MAX_LUN];
30324
30325- struct access_method access;
30326+ struct access_method *access;
30327
30328 /* queue and queue Info */
30329 struct hlist_head reqQ;
30330diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30331index 6422651..bb1bdef 100644
30332--- a/drivers/block/cpqarray.c
30333+++ b/drivers/block/cpqarray.c
30334@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30335 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30336 goto Enomem4;
30337 }
30338- hba[i]->access.set_intr_mask(hba[i], 0);
30339+ hba[i]->access->set_intr_mask(hba[i], 0);
30340 if (request_irq(hba[i]->intr, do_ida_intr,
30341 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30342 {
30343@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30344 add_timer(&hba[i]->timer);
30345
30346 /* Enable IRQ now that spinlock and rate limit timer are set up */
30347- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30348+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30349
30350 for(j=0; j<NWD; j++) {
30351 struct gendisk *disk = ida_gendisk[i][j];
30352@@ -695,7 +695,7 @@ DBGINFO(
30353 for(i=0; i<NR_PRODUCTS; i++) {
30354 if (board_id == products[i].board_id) {
30355 c->product_name = products[i].product_name;
30356- c->access = *(products[i].access);
30357+ c->access = products[i].access;
30358 break;
30359 }
30360 }
30361@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
30362 hba[ctlr]->intr = intr;
30363 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30364 hba[ctlr]->product_name = products[j].product_name;
30365- hba[ctlr]->access = *(products[j].access);
30366+ hba[ctlr]->access = products[j].access;
30367 hba[ctlr]->ctlr = ctlr;
30368 hba[ctlr]->board_id = board_id;
30369 hba[ctlr]->pci_dev = NULL; /* not PCI */
30370@@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
30371 struct scatterlist tmp_sg[SG_MAX];
30372 int i, dir, seg;
30373
30374+ pax_track_stack();
30375+
30376 if (blk_queue_plugged(q))
30377 goto startio;
30378
30379@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
30380
30381 while((c = h->reqQ) != NULL) {
30382 /* Can't do anything if we're busy */
30383- if (h->access.fifo_full(h) == 0)
30384+ if (h->access->fifo_full(h) == 0)
30385 return;
30386
30387 /* Get the first entry from the request Q */
30388@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
30389 h->Qdepth--;
30390
30391 /* Tell the controller to do our bidding */
30392- h->access.submit_command(h, c);
30393+ h->access->submit_command(h, c);
30394
30395 /* Get onto the completion Q */
30396 addQ(&h->cmpQ, c);
30397@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30398 unsigned long flags;
30399 __u32 a,a1;
30400
30401- istat = h->access.intr_pending(h);
30402+ istat = h->access->intr_pending(h);
30403 /* Is this interrupt for us? */
30404 if (istat == 0)
30405 return IRQ_NONE;
30406@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30407 */
30408 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30409 if (istat & FIFO_NOT_EMPTY) {
30410- while((a = h->access.command_completed(h))) {
30411+ while((a = h->access->command_completed(h))) {
30412 a1 = a; a &= ~3;
30413 if ((c = h->cmpQ) == NULL)
30414 {
30415@@ -1434,11 +1436,11 @@ static int sendcmd(
30416 /*
30417 * Disable interrupt
30418 */
30419- info_p->access.set_intr_mask(info_p, 0);
30420+ info_p->access->set_intr_mask(info_p, 0);
30421 /* Make sure there is room in the command FIFO */
30422 /* Actually it should be completely empty at this time. */
30423 for (i = 200000; i > 0; i--) {
30424- temp = info_p->access.fifo_full(info_p);
30425+ temp = info_p->access->fifo_full(info_p);
30426 if (temp != 0) {
30427 break;
30428 }
30429@@ -1451,7 +1453,7 @@ DBG(
30430 /*
30431 * Send the cmd
30432 */
30433- info_p->access.submit_command(info_p, c);
30434+ info_p->access->submit_command(info_p, c);
30435 complete = pollcomplete(ctlr);
30436
30437 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30438@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30439 * we check the new geometry. Then turn interrupts back on when
30440 * we're done.
30441 */
30442- host->access.set_intr_mask(host, 0);
30443+ host->access->set_intr_mask(host, 0);
30444 getgeometry(ctlr);
30445- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30446+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30447
30448 for(i=0; i<NWD; i++) {
30449 struct gendisk *disk = ida_gendisk[ctlr][i];
30450@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
30451 /* Wait (up to 2 seconds) for a command to complete */
30452
30453 for (i = 200000; i > 0; i--) {
30454- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30455+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30456 if (done == 0) {
30457 udelay(10); /* a short fixed delay */
30458 } else
30459diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30460index be73e9d..7fbf140 100644
30461--- a/drivers/block/cpqarray.h
30462+++ b/drivers/block/cpqarray.h
30463@@ -99,7 +99,7 @@ struct ctlr_info {
30464 drv_info_t drv[NWD];
30465 struct proc_dir_entry *proc;
30466
30467- struct access_method access;
30468+ struct access_method *access;
30469
30470 cmdlist_t *reqQ;
30471 cmdlist_t *cmpQ;
30472diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30473index 8ec2d70..2804b30 100644
30474--- a/drivers/block/loop.c
30475+++ b/drivers/block/loop.c
30476@@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
30477 mm_segment_t old_fs = get_fs();
30478
30479 set_fs(get_ds());
30480- bw = file->f_op->write(file, buf, len, &pos);
30481+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30482 set_fs(old_fs);
30483 if (likely(bw == len))
30484 return 0;
30485diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
30486index 26ada47..083c480 100644
30487--- a/drivers/block/nbd.c
30488+++ b/drivers/block/nbd.c
30489@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
30490 struct kvec iov;
30491 sigset_t blocked, oldset;
30492
30493+ pax_track_stack();
30494+
30495 if (unlikely(!sock)) {
30496 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
30497 lo->disk->disk_name, (send ? "send" : "recv"));
30498@@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
30499 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
30500 unsigned int cmd, unsigned long arg)
30501 {
30502+ pax_track_stack();
30503+
30504 switch (cmd) {
30505 case NBD_DISCONNECT: {
30506 struct request sreq;
30507diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
30508index a5d585d..d087be3 100644
30509--- a/drivers/block/pktcdvd.c
30510+++ b/drivers/block/pktcdvd.c
30511@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
30512 return len;
30513 }
30514
30515-static struct sysfs_ops kobj_pkt_ops = {
30516+static const struct sysfs_ops kobj_pkt_ops = {
30517 .show = kobj_pkt_show,
30518 .store = kobj_pkt_store
30519 };
30520diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
30521index 6aad99e..89cd142 100644
30522--- a/drivers/char/Kconfig
30523+++ b/drivers/char/Kconfig
30524@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
30525
30526 config DEVKMEM
30527 bool "/dev/kmem virtual device support"
30528- default y
30529+ default n
30530+ depends on !GRKERNSEC_KMEM
30531 help
30532 Say Y here if you want to support the /dev/kmem device. The
30533 /dev/kmem device is rarely used, but can be used for certain
30534@@ -1114,6 +1115,7 @@ config DEVPORT
30535 bool
30536 depends on !M68K
30537 depends on ISA || PCI
30538+ depends on !GRKERNSEC_KMEM
30539 default y
30540
30541 source "drivers/s390/char/Kconfig"
30542diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
30543index a96f319..a778a5b 100644
30544--- a/drivers/char/agp/frontend.c
30545+++ b/drivers/char/agp/frontend.c
30546@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
30547 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
30548 return -EFAULT;
30549
30550- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
30551+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
30552 return -EFAULT;
30553
30554 client = agp_find_client_by_pid(reserve.pid);
30555diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
30556index d8cff90..9628e70 100644
30557--- a/drivers/char/briq_panel.c
30558+++ b/drivers/char/briq_panel.c
30559@@ -10,6 +10,7 @@
30560 #include <linux/types.h>
30561 #include <linux/errno.h>
30562 #include <linux/tty.h>
30563+#include <linux/mutex.h>
30564 #include <linux/timer.h>
30565 #include <linux/kernel.h>
30566 #include <linux/wait.h>
30567@@ -36,6 +37,7 @@ static int vfd_is_open;
30568 static unsigned char vfd[40];
30569 static int vfd_cursor;
30570 static unsigned char ledpb, led;
30571+static DEFINE_MUTEX(vfd_mutex);
30572
30573 static void update_vfd(void)
30574 {
30575@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30576 if (!vfd_is_open)
30577 return -EBUSY;
30578
30579+ mutex_lock(&vfd_mutex);
30580 for (;;) {
30581 char c;
30582 if (!indx)
30583 break;
30584- if (get_user(c, buf))
30585+ if (get_user(c, buf)) {
30586+ mutex_unlock(&vfd_mutex);
30587 return -EFAULT;
30588+ }
30589 if (esc) {
30590 set_led(c);
30591 esc = 0;
30592@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30593 buf++;
30594 }
30595 update_vfd();
30596+ mutex_unlock(&vfd_mutex);
30597
30598 return len;
30599 }
30600diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
30601index 31e7c91..161afc0 100644
30602--- a/drivers/char/genrtc.c
30603+++ b/drivers/char/genrtc.c
30604@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
30605 switch (cmd) {
30606
30607 case RTC_PLL_GET:
30608+ memset(&pll, 0, sizeof(pll));
30609 if (get_rtc_pll(&pll))
30610 return -EINVAL;
30611 else
30612diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
30613index 006466d..a2bb21c 100644
30614--- a/drivers/char/hpet.c
30615+++ b/drivers/char/hpet.c
30616@@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
30617 return 0;
30618 }
30619
30620-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
30621+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
30622
30623 static int
30624 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
30625@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
30626 }
30627
30628 static int
30629-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30630+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
30631 {
30632 struct hpet_timer __iomem *timer;
30633 struct hpet __iomem *hpet;
30634@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30635 {
30636 struct hpet_info info;
30637
30638+ memset(&info, 0, sizeof(info));
30639+
30640 if (devp->hd_ireqfreq)
30641 info.hi_ireqfreq =
30642 hpet_time_div(hpetp, devp->hd_ireqfreq);
30643- else
30644- info.hi_ireqfreq = 0;
30645 info.hi_flags =
30646 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
30647 info.hi_hpet = hpetp->hp_which;
30648diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
30649index 0afc8b8..6913fc3 100644
30650--- a/drivers/char/hvc_beat.c
30651+++ b/drivers/char/hvc_beat.c
30652@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
30653 return cnt;
30654 }
30655
30656-static struct hv_ops hvc_beat_get_put_ops = {
30657+static const struct hv_ops hvc_beat_get_put_ops = {
30658 .get_chars = hvc_beat_get_chars,
30659 .put_chars = hvc_beat_put_chars,
30660 };
30661diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
30662index 98097f2..407dddc 100644
30663--- a/drivers/char/hvc_console.c
30664+++ b/drivers/char/hvc_console.c
30665@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
30666 * console interfaces but can still be used as a tty device. This has to be
30667 * static because kmalloc will not work during early console init.
30668 */
30669-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30670+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30671 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
30672 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
30673
30674@@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
30675 * vty adapters do NOT get an hvc_instantiate() callback since they
30676 * appear after early console init.
30677 */
30678-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
30679+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
30680 {
30681 struct hvc_struct *hp;
30682
30683@@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
30684 };
30685
30686 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
30687- struct hv_ops *ops, int outbuf_size)
30688+ const struct hv_ops *ops, int outbuf_size)
30689 {
30690 struct hvc_struct *hp;
30691 int i;
30692diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
30693index 10950ca..ed176c3 100644
30694--- a/drivers/char/hvc_console.h
30695+++ b/drivers/char/hvc_console.h
30696@@ -55,7 +55,7 @@ struct hvc_struct {
30697 int outbuf_size;
30698 int n_outbuf;
30699 uint32_t vtermno;
30700- struct hv_ops *ops;
30701+ const struct hv_ops *ops;
30702 int irq_requested;
30703 int data;
30704 struct winsize ws;
30705@@ -76,11 +76,11 @@ struct hv_ops {
30706 };
30707
30708 /* Register a vterm and a slot index for use as a console (console_init) */
30709-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
30710+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
30711
30712 /* register a vterm for hvc tty operation (module_init or hotplug add) */
30713 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
30714- struct hv_ops *ops, int outbuf_size);
30715+ const struct hv_ops *ops, int outbuf_size);
30716 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
30717 extern int hvc_remove(struct hvc_struct *hp);
30718
30719diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
30720index 936d05b..fd02426 100644
30721--- a/drivers/char/hvc_iseries.c
30722+++ b/drivers/char/hvc_iseries.c
30723@@ -197,7 +197,7 @@ done:
30724 return sent;
30725 }
30726
30727-static struct hv_ops hvc_get_put_ops = {
30728+static const struct hv_ops hvc_get_put_ops = {
30729 .get_chars = get_chars,
30730 .put_chars = put_chars,
30731 .notifier_add = notifier_add_irq,
30732diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
30733index b0e168f..69cda2a 100644
30734--- a/drivers/char/hvc_iucv.c
30735+++ b/drivers/char/hvc_iucv.c
30736@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
30737
30738
30739 /* HVC operations */
30740-static struct hv_ops hvc_iucv_ops = {
30741+static const struct hv_ops hvc_iucv_ops = {
30742 .get_chars = hvc_iucv_get_chars,
30743 .put_chars = hvc_iucv_put_chars,
30744 .notifier_add = hvc_iucv_notifier_add,
30745diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
30746index 88590d0..61c4a61 100644
30747--- a/drivers/char/hvc_rtas.c
30748+++ b/drivers/char/hvc_rtas.c
30749@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
30750 return i;
30751 }
30752
30753-static struct hv_ops hvc_rtas_get_put_ops = {
30754+static const struct hv_ops hvc_rtas_get_put_ops = {
30755 .get_chars = hvc_rtas_read_console,
30756 .put_chars = hvc_rtas_write_console,
30757 };
30758diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
30759index bd63ba8..b0957e6 100644
30760--- a/drivers/char/hvc_udbg.c
30761+++ b/drivers/char/hvc_udbg.c
30762@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
30763 return i;
30764 }
30765
30766-static struct hv_ops hvc_udbg_ops = {
30767+static const struct hv_ops hvc_udbg_ops = {
30768 .get_chars = hvc_udbg_get,
30769 .put_chars = hvc_udbg_put,
30770 };
30771diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
30772index 10be343..27370e9 100644
30773--- a/drivers/char/hvc_vio.c
30774+++ b/drivers/char/hvc_vio.c
30775@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
30776 return got;
30777 }
30778
30779-static struct hv_ops hvc_get_put_ops = {
30780+static const struct hv_ops hvc_get_put_ops = {
30781 .get_chars = filtered_get_chars,
30782 .put_chars = hvc_put_chars,
30783 .notifier_add = notifier_add_irq,
30784diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
30785index a6ee32b..94f8c26 100644
30786--- a/drivers/char/hvc_xen.c
30787+++ b/drivers/char/hvc_xen.c
30788@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
30789 return recv;
30790 }
30791
30792-static struct hv_ops hvc_ops = {
30793+static const struct hv_ops hvc_ops = {
30794 .get_chars = read_console,
30795 .put_chars = write_console,
30796 .notifier_add = notifier_add_irq,
30797diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
30798index 266b858..f3ee0bb 100644
30799--- a/drivers/char/hvcs.c
30800+++ b/drivers/char/hvcs.c
30801@@ -82,6 +82,7 @@
30802 #include <asm/hvcserver.h>
30803 #include <asm/uaccess.h>
30804 #include <asm/vio.h>
30805+#include <asm/local.h>
30806
30807 /*
30808 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
30809@@ -269,7 +270,7 @@ struct hvcs_struct {
30810 unsigned int index;
30811
30812 struct tty_struct *tty;
30813- int open_count;
30814+ local_t open_count;
30815
30816 /*
30817 * Used to tell the driver kernel_thread what operations need to take
30818@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
30819
30820 spin_lock_irqsave(&hvcsd->lock, flags);
30821
30822- if (hvcsd->open_count > 0) {
30823+ if (local_read(&hvcsd->open_count) > 0) {
30824 spin_unlock_irqrestore(&hvcsd->lock, flags);
30825 printk(KERN_INFO "HVCS: vterm state unchanged. "
30826 "The hvcs device node is still in use.\n");
30827@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
30828 if ((retval = hvcs_partner_connect(hvcsd)))
30829 goto error_release;
30830
30831- hvcsd->open_count = 1;
30832+ local_set(&hvcsd->open_count, 1);
30833 hvcsd->tty = tty;
30834 tty->driver_data = hvcsd;
30835
30836@@ -1169,7 +1170,7 @@ fast_open:
30837
30838 spin_lock_irqsave(&hvcsd->lock, flags);
30839 kref_get(&hvcsd->kref);
30840- hvcsd->open_count++;
30841+ local_inc(&hvcsd->open_count);
30842 hvcsd->todo_mask |= HVCS_SCHED_READ;
30843 spin_unlock_irqrestore(&hvcsd->lock, flags);
30844
30845@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
30846 hvcsd = tty->driver_data;
30847
30848 spin_lock_irqsave(&hvcsd->lock, flags);
30849- if (--hvcsd->open_count == 0) {
30850+ if (local_dec_and_test(&hvcsd->open_count)) {
30851
30852 vio_disable_interrupts(hvcsd->vdev);
30853
30854@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
30855 free_irq(irq, hvcsd);
30856 kref_put(&hvcsd->kref, destroy_hvcs_struct);
30857 return;
30858- } else if (hvcsd->open_count < 0) {
30859+ } else if (local_read(&hvcsd->open_count) < 0) {
30860 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
30861 " is missmanaged.\n",
30862- hvcsd->vdev->unit_address, hvcsd->open_count);
30863+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
30864 }
30865
30866 spin_unlock_irqrestore(&hvcsd->lock, flags);
30867@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
30868
30869 spin_lock_irqsave(&hvcsd->lock, flags);
30870 /* Preserve this so that we know how many kref refs to put */
30871- temp_open_count = hvcsd->open_count;
30872+ temp_open_count = local_read(&hvcsd->open_count);
30873
30874 /*
30875 * Don't kref put inside the spinlock because the destruction
30876@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
30877 hvcsd->tty->driver_data = NULL;
30878 hvcsd->tty = NULL;
30879
30880- hvcsd->open_count = 0;
30881+ local_set(&hvcsd->open_count, 0);
30882
30883 /* This will drop any buffered data on the floor which is OK in a hangup
30884 * scenario. */
30885@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
30886 * the middle of a write operation? This is a crummy place to do this
30887 * but we want to keep it all in the spinlock.
30888 */
30889- if (hvcsd->open_count <= 0) {
30890+ if (local_read(&hvcsd->open_count) <= 0) {
30891 spin_unlock_irqrestore(&hvcsd->lock, flags);
30892 return -ENODEV;
30893 }
30894@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
30895 {
30896 struct hvcs_struct *hvcsd = tty->driver_data;
30897
30898- if (!hvcsd || hvcsd->open_count <= 0)
30899+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
30900 return 0;
30901
30902 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
30903diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
30904index ec5e3f8..02455ba 100644
30905--- a/drivers/char/ipmi/ipmi_msghandler.c
30906+++ b/drivers/char/ipmi/ipmi_msghandler.c
30907@@ -414,7 +414,7 @@ struct ipmi_smi {
30908 struct proc_dir_entry *proc_dir;
30909 char proc_dir_name[10];
30910
30911- atomic_t stats[IPMI_NUM_STATS];
30912+ atomic_unchecked_t stats[IPMI_NUM_STATS];
30913
30914 /*
30915 * run_to_completion duplicate of smb_info, smi_info
30916@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
30917
30918
30919 #define ipmi_inc_stat(intf, stat) \
30920- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
30921+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
30922 #define ipmi_get_stat(intf, stat) \
30923- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
30924+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
30925
30926 static int is_lan_addr(struct ipmi_addr *addr)
30927 {
30928@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
30929 INIT_LIST_HEAD(&intf->cmd_rcvrs);
30930 init_waitqueue_head(&intf->waitq);
30931 for (i = 0; i < IPMI_NUM_STATS; i++)
30932- atomic_set(&intf->stats[i], 0);
30933+ atomic_set_unchecked(&intf->stats[i], 0);
30934
30935 intf->proc_dir = NULL;
30936
30937@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
30938 struct ipmi_smi_msg smi_msg;
30939 struct ipmi_recv_msg recv_msg;
30940
30941+ pax_track_stack();
30942+
30943 si = (struct ipmi_system_interface_addr *) &addr;
30944 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
30945 si->channel = IPMI_BMC_CHANNEL;
30946diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
30947index abae8c9..8021979 100644
30948--- a/drivers/char/ipmi/ipmi_si_intf.c
30949+++ b/drivers/char/ipmi/ipmi_si_intf.c
30950@@ -277,7 +277,7 @@ struct smi_info {
30951 unsigned char slave_addr;
30952
30953 /* Counters and things for the proc filesystem. */
30954- atomic_t stats[SI_NUM_STATS];
30955+ atomic_unchecked_t stats[SI_NUM_STATS];
30956
30957 struct task_struct *thread;
30958
30959@@ -285,9 +285,9 @@ struct smi_info {
30960 };
30961
30962 #define smi_inc_stat(smi, stat) \
30963- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
30964+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
30965 #define smi_get_stat(smi, stat) \
30966- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
30967+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
30968
30969 #define SI_MAX_PARMS 4
30970
30971@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
30972 atomic_set(&new_smi->req_events, 0);
30973 new_smi->run_to_completion = 0;
30974 for (i = 0; i < SI_NUM_STATS; i++)
30975- atomic_set(&new_smi->stats[i], 0);
30976+ atomic_set_unchecked(&new_smi->stats[i], 0);
30977
30978 new_smi->interrupt_disabled = 0;
30979 atomic_set(&new_smi->stop_operation, 0);
30980diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
30981index 402838f..55e2200 100644
30982--- a/drivers/char/istallion.c
30983+++ b/drivers/char/istallion.c
30984@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
30985 * re-used for each stats call.
30986 */
30987 static comstats_t stli_comstats;
30988-static combrd_t stli_brdstats;
30989 static struct asystats stli_cdkstats;
30990
30991 /*****************************************************************************/
30992@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
30993 {
30994 struct stlibrd *brdp;
30995 unsigned int i;
30996+ combrd_t stli_brdstats;
30997
30998 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
30999 return -EFAULT;
31000@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31001 struct stliport stli_dummyport;
31002 struct stliport *portp;
31003
31004+ pax_track_stack();
31005+
31006 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31007 return -EFAULT;
31008 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31009@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31010 struct stlibrd stli_dummybrd;
31011 struct stlibrd *brdp;
31012
31013+ pax_track_stack();
31014+
31015 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
31016 return -EFAULT;
31017 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
31018diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
31019index 950837c..e55a288 100644
31020--- a/drivers/char/keyboard.c
31021+++ b/drivers/char/keyboard.c
31022@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
31023 kbd->kbdmode == VC_MEDIUMRAW) &&
31024 value != KVAL(K_SAK))
31025 return; /* SAK is allowed even in raw mode */
31026+
31027+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31028+ {
31029+ void *func = fn_handler[value];
31030+ if (func == fn_show_state || func == fn_show_ptregs ||
31031+ func == fn_show_mem)
31032+ return;
31033+ }
31034+#endif
31035+
31036 fn_handler[value](vc);
31037 }
31038
31039@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
31040 .evbit = { BIT_MASK(EV_SND) },
31041 },
31042
31043- { }, /* Terminating entry */
31044+ { 0 }, /* Terminating entry */
31045 };
31046
31047 MODULE_DEVICE_TABLE(input, kbd_ids);
31048diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
31049index 87c67b4..230527a 100644
31050--- a/drivers/char/mbcs.c
31051+++ b/drivers/char/mbcs.c
31052@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
31053 return 0;
31054 }
31055
31056-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
31057+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
31058 {
31059 .part_num = MBCS_PART_NUM,
31060 .mfg_num = MBCS_MFG_NUM,
31061diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31062index 1270f64..8495f49 100644
31063--- a/drivers/char/mem.c
31064+++ b/drivers/char/mem.c
31065@@ -18,6 +18,7 @@
31066 #include <linux/raw.h>
31067 #include <linux/tty.h>
31068 #include <linux/capability.h>
31069+#include <linux/security.h>
31070 #include <linux/ptrace.h>
31071 #include <linux/device.h>
31072 #include <linux/highmem.h>
31073@@ -35,6 +36,10 @@
31074 # include <linux/efi.h>
31075 #endif
31076
31077+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31078+extern struct file_operations grsec_fops;
31079+#endif
31080+
31081 static inline unsigned long size_inside_page(unsigned long start,
31082 unsigned long size)
31083 {
31084@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31085
31086 while (cursor < to) {
31087 if (!devmem_is_allowed(pfn)) {
31088+#ifdef CONFIG_GRKERNSEC_KMEM
31089+ gr_handle_mem_readwrite(from, to);
31090+#else
31091 printk(KERN_INFO
31092 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31093 current->comm, from, to);
31094+#endif
31095 return 0;
31096 }
31097 cursor += PAGE_SIZE;
31098@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31099 }
31100 return 1;
31101 }
31102+#elif defined(CONFIG_GRKERNSEC_KMEM)
31103+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31104+{
31105+ return 0;
31106+}
31107 #else
31108 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31109 {
31110@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31111 #endif
31112
31113 while (count > 0) {
31114+ char *temp;
31115+
31116 /*
31117 * Handle first page in case it's not aligned
31118 */
31119@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31120 if (!ptr)
31121 return -EFAULT;
31122
31123- if (copy_to_user(buf, ptr, sz)) {
31124+#ifdef CONFIG_PAX_USERCOPY
31125+ temp = kmalloc(sz, GFP_KERNEL);
31126+ if (!temp) {
31127+ unxlate_dev_mem_ptr(p, ptr);
31128+ return -ENOMEM;
31129+ }
31130+ memcpy(temp, ptr, sz);
31131+#else
31132+ temp = ptr;
31133+#endif
31134+
31135+ if (copy_to_user(buf, temp, sz)) {
31136+
31137+#ifdef CONFIG_PAX_USERCOPY
31138+ kfree(temp);
31139+#endif
31140+
31141 unxlate_dev_mem_ptr(p, ptr);
31142 return -EFAULT;
31143 }
31144
31145+#ifdef CONFIG_PAX_USERCOPY
31146+ kfree(temp);
31147+#endif
31148+
31149 unxlate_dev_mem_ptr(p, ptr);
31150
31151 buf += sz;
31152@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31153 size_t count, loff_t *ppos)
31154 {
31155 unsigned long p = *ppos;
31156- ssize_t low_count, read, sz;
31157+ ssize_t low_count, read, sz, err = 0;
31158 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31159- int err = 0;
31160
31161 read = 0;
31162 if (p < (unsigned long) high_memory) {
31163@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31164 }
31165 #endif
31166 while (low_count > 0) {
31167+ char *temp;
31168+
31169 sz = size_inside_page(p, low_count);
31170
31171 /*
31172@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31173 */
31174 kbuf = xlate_dev_kmem_ptr((char *)p);
31175
31176- if (copy_to_user(buf, kbuf, sz))
31177+#ifdef CONFIG_PAX_USERCOPY
31178+ temp = kmalloc(sz, GFP_KERNEL);
31179+ if (!temp)
31180+ return -ENOMEM;
31181+ memcpy(temp, kbuf, sz);
31182+#else
31183+ temp = kbuf;
31184+#endif
31185+
31186+ err = copy_to_user(buf, temp, sz);
31187+
31188+#ifdef CONFIG_PAX_USERCOPY
31189+ kfree(temp);
31190+#endif
31191+
31192+ if (err)
31193 return -EFAULT;
31194 buf += sz;
31195 p += sz;
31196@@ -889,6 +941,9 @@ static const struct memdev {
31197 #ifdef CONFIG_CRASH_DUMP
31198 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31199 #endif
31200+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31201+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31202+#endif
31203 };
31204
31205 static int memory_open(struct inode *inode, struct file *filp)
31206diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
31207index 674b3ab..a8d1970 100644
31208--- a/drivers/char/pcmcia/ipwireless/tty.c
31209+++ b/drivers/char/pcmcia/ipwireless/tty.c
31210@@ -29,6 +29,7 @@
31211 #include <linux/tty_driver.h>
31212 #include <linux/tty_flip.h>
31213 #include <linux/uaccess.h>
31214+#include <asm/local.h>
31215
31216 #include "tty.h"
31217 #include "network.h"
31218@@ -51,7 +52,7 @@ struct ipw_tty {
31219 int tty_type;
31220 struct ipw_network *network;
31221 struct tty_struct *linux_tty;
31222- int open_count;
31223+ local_t open_count;
31224 unsigned int control_lines;
31225 struct mutex ipw_tty_mutex;
31226 int tx_bytes_queued;
31227@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31228 mutex_unlock(&tty->ipw_tty_mutex);
31229 return -ENODEV;
31230 }
31231- if (tty->open_count == 0)
31232+ if (local_read(&tty->open_count) == 0)
31233 tty->tx_bytes_queued = 0;
31234
31235- tty->open_count++;
31236+ local_inc(&tty->open_count);
31237
31238 tty->linux_tty = linux_tty;
31239 linux_tty->driver_data = tty;
31240@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31241
31242 static void do_ipw_close(struct ipw_tty *tty)
31243 {
31244- tty->open_count--;
31245-
31246- if (tty->open_count == 0) {
31247+ if (local_dec_return(&tty->open_count) == 0) {
31248 struct tty_struct *linux_tty = tty->linux_tty;
31249
31250 if (linux_tty != NULL) {
31251@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
31252 return;
31253
31254 mutex_lock(&tty->ipw_tty_mutex);
31255- if (tty->open_count == 0) {
31256+ if (local_read(&tty->open_count) == 0) {
31257 mutex_unlock(&tty->ipw_tty_mutex);
31258 return;
31259 }
31260@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
31261 return;
31262 }
31263
31264- if (!tty->open_count) {
31265+ if (!local_read(&tty->open_count)) {
31266 mutex_unlock(&tty->ipw_tty_mutex);
31267 return;
31268 }
31269@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
31270 return -ENODEV;
31271
31272 mutex_lock(&tty->ipw_tty_mutex);
31273- if (!tty->open_count) {
31274+ if (!local_read(&tty->open_count)) {
31275 mutex_unlock(&tty->ipw_tty_mutex);
31276 return -EINVAL;
31277 }
31278@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
31279 if (!tty)
31280 return -ENODEV;
31281
31282- if (!tty->open_count)
31283+ if (!local_read(&tty->open_count))
31284 return -EINVAL;
31285
31286 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
31287@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
31288 if (!tty)
31289 return 0;
31290
31291- if (!tty->open_count)
31292+ if (!local_read(&tty->open_count))
31293 return 0;
31294
31295 return tty->tx_bytes_queued;
31296@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
31297 if (!tty)
31298 return -ENODEV;
31299
31300- if (!tty->open_count)
31301+ if (!local_read(&tty->open_count))
31302 return -EINVAL;
31303
31304 return get_control_lines(tty);
31305@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
31306 if (!tty)
31307 return -ENODEV;
31308
31309- if (!tty->open_count)
31310+ if (!local_read(&tty->open_count))
31311 return -EINVAL;
31312
31313 return set_control_lines(tty, set, clear);
31314@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
31315 if (!tty)
31316 return -ENODEV;
31317
31318- if (!tty->open_count)
31319+ if (!local_read(&tty->open_count))
31320 return -EINVAL;
31321
31322 /* FIXME: Exactly how is the tty object locked here .. */
31323@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
31324 against a parallel ioctl etc */
31325 mutex_lock(&ttyj->ipw_tty_mutex);
31326 }
31327- while (ttyj->open_count)
31328+ while (local_read(&ttyj->open_count))
31329 do_ipw_close(ttyj);
31330 ipwireless_disassociate_network_ttys(network,
31331 ttyj->channel_idx);
31332diff --git a/drivers/char/pty.c b/drivers/char/pty.c
31333index 62f282e..e45c45c 100644
31334--- a/drivers/char/pty.c
31335+++ b/drivers/char/pty.c
31336@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
31337 register_sysctl_table(pty_root_table);
31338
31339 /* Now create the /dev/ptmx special device */
31340+ pax_open_kernel();
31341 tty_default_fops(&ptmx_fops);
31342- ptmx_fops.open = ptmx_open;
31343+ *(void **)&ptmx_fops.open = ptmx_open;
31344+ pax_close_kernel();
31345
31346 cdev_init(&ptmx_cdev, &ptmx_fops);
31347 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
31348diff --git a/drivers/char/random.c b/drivers/char/random.c
31349index 3a19e2d..6ed09d3 100644
31350--- a/drivers/char/random.c
31351+++ b/drivers/char/random.c
31352@@ -254,8 +254,13 @@
31353 /*
31354 * Configuration information
31355 */
31356+#ifdef CONFIG_GRKERNSEC_RANDNET
31357+#define INPUT_POOL_WORDS 512
31358+#define OUTPUT_POOL_WORDS 128
31359+#else
31360 #define INPUT_POOL_WORDS 128
31361 #define OUTPUT_POOL_WORDS 32
31362+#endif
31363 #define SEC_XFER_SIZE 512
31364
31365 /*
31366@@ -292,10 +297,17 @@ static struct poolinfo {
31367 int poolwords;
31368 int tap1, tap2, tap3, tap4, tap5;
31369 } poolinfo_table[] = {
31370+#ifdef CONFIG_GRKERNSEC_RANDNET
31371+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31372+ { 512, 411, 308, 208, 104, 1 },
31373+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31374+ { 128, 103, 76, 51, 25, 1 },
31375+#else
31376 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31377 { 128, 103, 76, 51, 25, 1 },
31378 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31379 { 32, 26, 20, 14, 7, 1 },
31380+#endif
31381 #if 0
31382 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31383 { 2048, 1638, 1231, 819, 411, 1 },
31384@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31385 #include <linux/sysctl.h>
31386
31387 static int min_read_thresh = 8, min_write_thresh;
31388-static int max_read_thresh = INPUT_POOL_WORDS * 32;
31389+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31390 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31391 static char sysctl_bootid[16];
31392
31393diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
31394index 0e29a23..0efc2c2 100644
31395--- a/drivers/char/rocket.c
31396+++ b/drivers/char/rocket.c
31397@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
31398 struct rocket_ports tmp;
31399 int board;
31400
31401+ pax_track_stack();
31402+
31403 if (!retports)
31404 return -EFAULT;
31405 memset(&tmp, 0, sizeof (tmp));
31406diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31407index 8c262aa..4d3b058 100644
31408--- a/drivers/char/sonypi.c
31409+++ b/drivers/char/sonypi.c
31410@@ -55,6 +55,7 @@
31411 #include <asm/uaccess.h>
31412 #include <asm/io.h>
31413 #include <asm/system.h>
31414+#include <asm/local.h>
31415
31416 #include <linux/sonypi.h>
31417
31418@@ -491,7 +492,7 @@ static struct sonypi_device {
31419 spinlock_t fifo_lock;
31420 wait_queue_head_t fifo_proc_list;
31421 struct fasync_struct *fifo_async;
31422- int open_count;
31423+ local_t open_count;
31424 int model;
31425 struct input_dev *input_jog_dev;
31426 struct input_dev *input_key_dev;
31427@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31428 static int sonypi_misc_release(struct inode *inode, struct file *file)
31429 {
31430 mutex_lock(&sonypi_device.lock);
31431- sonypi_device.open_count--;
31432+ local_dec(&sonypi_device.open_count);
31433 mutex_unlock(&sonypi_device.lock);
31434 return 0;
31435 }
31436@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31437 lock_kernel();
31438 mutex_lock(&sonypi_device.lock);
31439 /* Flush input queue on first open */
31440- if (!sonypi_device.open_count)
31441+ if (!local_read(&sonypi_device.open_count))
31442 kfifo_reset(sonypi_device.fifo);
31443- sonypi_device.open_count++;
31444+ local_inc(&sonypi_device.open_count);
31445 mutex_unlock(&sonypi_device.lock);
31446 unlock_kernel();
31447 return 0;
31448diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
31449index db6dcfa..13834cb 100644
31450--- a/drivers/char/stallion.c
31451+++ b/drivers/char/stallion.c
31452@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
31453 struct stlport stl_dummyport;
31454 struct stlport *portp;
31455
31456+ pax_track_stack();
31457+
31458 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
31459 return -EFAULT;
31460 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
31461diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31462index a0789f6..cea3902 100644
31463--- a/drivers/char/tpm/tpm.c
31464+++ b/drivers/char/tpm/tpm.c
31465@@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31466 chip->vendor.req_complete_val)
31467 goto out_recv;
31468
31469- if ((status == chip->vendor.req_canceled)) {
31470+ if (status == chip->vendor.req_canceled) {
31471 dev_err(chip->dev, "Operation Canceled\n");
31472 rc = -ECANCELED;
31473 goto out;
31474@@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
31475
31476 struct tpm_chip *chip = dev_get_drvdata(dev);
31477
31478+ pax_track_stack();
31479+
31480 tpm_cmd.header.in = tpm_readpubek_header;
31481 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
31482 "attempting to read the PUBEK");
31483diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
31484index bf2170f..ce8cab9 100644
31485--- a/drivers/char/tpm/tpm_bios.c
31486+++ b/drivers/char/tpm/tpm_bios.c
31487@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31488 event = addr;
31489
31490 if ((event->event_type == 0 && event->event_size == 0) ||
31491- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
31492+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
31493 return NULL;
31494
31495 return addr;
31496@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
31497 return NULL;
31498
31499 if ((event->event_type == 0 && event->event_size == 0) ||
31500- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
31501+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
31502 return NULL;
31503
31504 (*pos)++;
31505@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
31506 int i;
31507
31508 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
31509- seq_putc(m, data[i]);
31510+ if (!seq_putc(m, data[i]))
31511+ return -EFAULT;
31512
31513 return 0;
31514 }
31515@@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
31516 log->bios_event_log_end = log->bios_event_log + len;
31517
31518 virt = acpi_os_map_memory(start, len);
31519+ if (!virt) {
31520+ kfree(log->bios_event_log);
31521+ log->bios_event_log = NULL;
31522+ return -EFAULT;
31523+ }
31524
31525- memcpy(log->bios_event_log, virt, len);
31526+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
31527
31528 acpi_os_unmap_memory(virt, len);
31529 return 0;
31530diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
31531index 123cedf..137edef 100644
31532--- a/drivers/char/tty_io.c
31533+++ b/drivers/char/tty_io.c
31534@@ -1774,6 +1774,7 @@ got_driver:
31535
31536 if (IS_ERR(tty)) {
31537 mutex_unlock(&tty_mutex);
31538+ tty_driver_kref_put(driver);
31539 return PTR_ERR(tty);
31540 }
31541 }
31542@@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31543 return retval;
31544 }
31545
31546+EXPORT_SYMBOL(tty_ioctl);
31547+
31548 #ifdef CONFIG_COMPAT
31549-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31550+long tty_compat_ioctl(struct file *file, unsigned int cmd,
31551 unsigned long arg)
31552 {
31553 struct inode *inode = file->f_dentry->d_inode;
31554@@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31555
31556 return retval;
31557 }
31558+
31559+EXPORT_SYMBOL(tty_compat_ioctl);
31560 #endif
31561
31562 /*
31563@@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
31564
31565 void tty_default_fops(struct file_operations *fops)
31566 {
31567- *fops = tty_fops;
31568+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
31569 }
31570
31571 /*
31572diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
31573index d814a3d..b55b9c9 100644
31574--- a/drivers/char/tty_ldisc.c
31575+++ b/drivers/char/tty_ldisc.c
31576@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
31577 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
31578 struct tty_ldisc_ops *ldo = ld->ops;
31579
31580- ldo->refcount--;
31581+ atomic_dec(&ldo->refcount);
31582 module_put(ldo->owner);
31583 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31584
31585@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
31586 spin_lock_irqsave(&tty_ldisc_lock, flags);
31587 tty_ldiscs[disc] = new_ldisc;
31588 new_ldisc->num = disc;
31589- new_ldisc->refcount = 0;
31590+ atomic_set(&new_ldisc->refcount, 0);
31591 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31592
31593 return ret;
31594@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
31595 return -EINVAL;
31596
31597 spin_lock_irqsave(&tty_ldisc_lock, flags);
31598- if (tty_ldiscs[disc]->refcount)
31599+ if (atomic_read(&tty_ldiscs[disc]->refcount))
31600 ret = -EBUSY;
31601 else
31602 tty_ldiscs[disc] = NULL;
31603@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
31604 if (ldops) {
31605 ret = ERR_PTR(-EAGAIN);
31606 if (try_module_get(ldops->owner)) {
31607- ldops->refcount++;
31608+ atomic_inc(&ldops->refcount);
31609 ret = ldops;
31610 }
31611 }
31612@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
31613 unsigned long flags;
31614
31615 spin_lock_irqsave(&tty_ldisc_lock, flags);
31616- ldops->refcount--;
31617+ atomic_dec(&ldops->refcount);
31618 module_put(ldops->owner);
31619 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31620 }
31621diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
31622index a035ae3..c27fe2c 100644
31623--- a/drivers/char/virtio_console.c
31624+++ b/drivers/char/virtio_console.c
31625@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
31626 * virtqueue, so we let the drivers do some boutique early-output thing. */
31627 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
31628 {
31629- virtio_cons.put_chars = put_chars;
31630+ pax_open_kernel();
31631+ *(void **)&virtio_cons.put_chars = put_chars;
31632+ pax_close_kernel();
31633 return hvc_instantiate(0, 0, &virtio_cons);
31634 }
31635
31636@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
31637 out_vq = vqs[1];
31638
31639 /* Start using the new console output. */
31640- virtio_cons.get_chars = get_chars;
31641- virtio_cons.put_chars = put_chars;
31642- virtio_cons.notifier_add = notifier_add_vio;
31643- virtio_cons.notifier_del = notifier_del_vio;
31644- virtio_cons.notifier_hangup = notifier_del_vio;
31645+ pax_open_kernel();
31646+ *(void **)&virtio_cons.get_chars = get_chars;
31647+ *(void **)&virtio_cons.put_chars = put_chars;
31648+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
31649+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
31650+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
31651+ pax_close_kernel();
31652
31653 /* The first argument of hvc_alloc() is the virtual console number, so
31654 * we use zero. The second argument is the parameter for the
31655diff --git a/drivers/char/vt.c b/drivers/char/vt.c
31656index 0c80c68..53d59c1 100644
31657--- a/drivers/char/vt.c
31658+++ b/drivers/char/vt.c
31659@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
31660
31661 static void notify_write(struct vc_data *vc, unsigned int unicode)
31662 {
31663- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
31664+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
31665 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
31666 }
31667
31668diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
31669index 6351a26..999af95 100644
31670--- a/drivers/char/vt_ioctl.c
31671+++ b/drivers/char/vt_ioctl.c
31672@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
31673 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
31674 return -EFAULT;
31675
31676- if (!capable(CAP_SYS_TTY_CONFIG))
31677- perm = 0;
31678-
31679 switch (cmd) {
31680 case KDGKBENT:
31681 key_map = key_maps[s];
31682@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
31683 val = (i ? K_HOLE : K_NOSUCHMAP);
31684 return put_user(val, &user_kbe->kb_value);
31685 case KDSKBENT:
31686+ if (!capable(CAP_SYS_TTY_CONFIG))
31687+ perm = 0;
31688+
31689 if (!perm)
31690 return -EPERM;
31691+
31692 if (!i && v == K_NOSUCHMAP) {
31693 /* deallocate map */
31694 key_map = key_maps[s];
31695@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
31696 int i, j, k;
31697 int ret;
31698
31699- if (!capable(CAP_SYS_TTY_CONFIG))
31700- perm = 0;
31701-
31702 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
31703 if (!kbs) {
31704 ret = -ENOMEM;
31705@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
31706 kfree(kbs);
31707 return ((p && *p) ? -EOVERFLOW : 0);
31708 case KDSKBSENT:
31709+ if (!capable(CAP_SYS_TTY_CONFIG))
31710+ perm = 0;
31711+
31712 if (!perm) {
31713 ret = -EPERM;
31714 goto reterr;
31715diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
31716index c7ae026..1769c1d 100644
31717--- a/drivers/cpufreq/cpufreq.c
31718+++ b/drivers/cpufreq/cpufreq.c
31719@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
31720 complete(&policy->kobj_unregister);
31721 }
31722
31723-static struct sysfs_ops sysfs_ops = {
31724+static const struct sysfs_ops sysfs_ops = {
31725 .show = show,
31726 .store = store,
31727 };
31728diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
31729index 97b0038..2056670 100644
31730--- a/drivers/cpuidle/sysfs.c
31731+++ b/drivers/cpuidle/sysfs.c
31732@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
31733 return ret;
31734 }
31735
31736-static struct sysfs_ops cpuidle_sysfs_ops = {
31737+static const struct sysfs_ops cpuidle_sysfs_ops = {
31738 .show = cpuidle_show,
31739 .store = cpuidle_store,
31740 };
31741@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
31742 return ret;
31743 }
31744
31745-static struct sysfs_ops cpuidle_state_sysfs_ops = {
31746+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
31747 .show = cpuidle_state_show,
31748 };
31749
31750@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
31751 .release = cpuidle_state_sysfs_release,
31752 };
31753
31754-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
31755+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
31756 {
31757 kobject_put(&device->kobjs[i]->kobj);
31758 wait_for_completion(&device->kobjs[i]->kobj_unregister);
31759diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
31760index 5f753fc..0377ae9 100644
31761--- a/drivers/crypto/hifn_795x.c
31762+++ b/drivers/crypto/hifn_795x.c
31763@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
31764 0xCA, 0x34, 0x2B, 0x2E};
31765 struct scatterlist sg;
31766
31767+ pax_track_stack();
31768+
31769 memset(src, 0, sizeof(src));
31770 memset(ctx.key, 0, sizeof(ctx.key));
31771
31772diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
31773index 71e6482..de8d96c 100644
31774--- a/drivers/crypto/padlock-aes.c
31775+++ b/drivers/crypto/padlock-aes.c
31776@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
31777 struct crypto_aes_ctx gen_aes;
31778 int cpu;
31779
31780+ pax_track_stack();
31781+
31782 if (key_len % 8) {
31783 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
31784 return -EINVAL;
31785diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
31786index dcc4ab7..cc834bb 100644
31787--- a/drivers/dma/ioat/dma.c
31788+++ b/drivers/dma/ioat/dma.c
31789@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
31790 return entry->show(&chan->common, page);
31791 }
31792
31793-struct sysfs_ops ioat_sysfs_ops = {
31794+const struct sysfs_ops ioat_sysfs_ops = {
31795 .show = ioat_attr_show,
31796 };
31797
31798diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
31799index bbc3e78..f2db62c 100644
31800--- a/drivers/dma/ioat/dma.h
31801+++ b/drivers/dma/ioat/dma.h
31802@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
31803 unsigned long *phys_complete);
31804 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
31805 void ioat_kobject_del(struct ioatdma_device *device);
31806-extern struct sysfs_ops ioat_sysfs_ops;
31807+extern const struct sysfs_ops ioat_sysfs_ops;
31808 extern struct ioat_sysfs_entry ioat_version_attr;
31809 extern struct ioat_sysfs_entry ioat_cap_attr;
31810 #endif /* IOATDMA_H */
31811diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
31812index 9908c9e..3ceb0e5 100644
31813--- a/drivers/dma/ioat/dma_v3.c
31814+++ b/drivers/dma/ioat/dma_v3.c
31815@@ -71,10 +71,10 @@
31816 /* provide a lookup table for setting the source address in the base or
31817 * extended descriptor of an xor or pq descriptor
31818 */
31819-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
31820-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
31821-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
31822-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
31823+static const u8 xor_idx_to_desc = 0xd0;
31824+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
31825+static const u8 pq_idx_to_desc = 0xf8;
31826+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
31827
31828 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
31829 {
31830diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
31831index 85c464a..afd1e73 100644
31832--- a/drivers/edac/amd64_edac.c
31833+++ b/drivers/edac/amd64_edac.c
31834@@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
31835 * PCI core identifies what devices are on a system during boot, and then
31836 * inquiry this table to see if this driver is for a given device found.
31837 */
31838-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
31839+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
31840 {
31841 .vendor = PCI_VENDOR_ID_AMD,
31842 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
31843diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
31844index 2b95f1a..4f52793 100644
31845--- a/drivers/edac/amd76x_edac.c
31846+++ b/drivers/edac/amd76x_edac.c
31847@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
31848 edac_mc_free(mci);
31849 }
31850
31851-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
31852+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
31853 {
31854 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
31855 AMD762},
31856diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
31857index d205d49..74c9672 100644
31858--- a/drivers/edac/e752x_edac.c
31859+++ b/drivers/edac/e752x_edac.c
31860@@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
31861 edac_mc_free(mci);
31862 }
31863
31864-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
31865+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
31866 {
31867 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
31868 E7520},
31869diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
31870index c7d11cc..c59c1ca 100644
31871--- a/drivers/edac/e7xxx_edac.c
31872+++ b/drivers/edac/e7xxx_edac.c
31873@@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
31874 edac_mc_free(mci);
31875 }
31876
31877-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
31878+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
31879 {
31880 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
31881 E7205},
31882diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
31883index 5376457..5fdedbc 100644
31884--- a/drivers/edac/edac_device_sysfs.c
31885+++ b/drivers/edac/edac_device_sysfs.c
31886@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
31887 }
31888
31889 /* edac_dev file operations for an 'ctl_info' */
31890-static struct sysfs_ops device_ctl_info_ops = {
31891+static const struct sysfs_ops device_ctl_info_ops = {
31892 .show = edac_dev_ctl_info_show,
31893 .store = edac_dev_ctl_info_store
31894 };
31895@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
31896 }
31897
31898 /* edac_dev file operations for an 'instance' */
31899-static struct sysfs_ops device_instance_ops = {
31900+static const struct sysfs_ops device_instance_ops = {
31901 .show = edac_dev_instance_show,
31902 .store = edac_dev_instance_store
31903 };
31904@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
31905 }
31906
31907 /* edac_dev file operations for a 'block' */
31908-static struct sysfs_ops device_block_ops = {
31909+static const struct sysfs_ops device_block_ops = {
31910 .show = edac_dev_block_show,
31911 .store = edac_dev_block_store
31912 };
31913diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
31914index e1d4ce0..88840e9 100644
31915--- a/drivers/edac/edac_mc_sysfs.c
31916+++ b/drivers/edac/edac_mc_sysfs.c
31917@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
31918 return -EIO;
31919 }
31920
31921-static struct sysfs_ops csrowfs_ops = {
31922+static const struct sysfs_ops csrowfs_ops = {
31923 .show = csrowdev_show,
31924 .store = csrowdev_store
31925 };
31926@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
31927 }
31928
31929 /* Intermediate show/store table */
31930-static struct sysfs_ops mci_ops = {
31931+static const struct sysfs_ops mci_ops = {
31932 .show = mcidev_show,
31933 .store = mcidev_store
31934 };
31935diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
31936index 422728c..d8d9c88 100644
31937--- a/drivers/edac/edac_pci_sysfs.c
31938+++ b/drivers/edac/edac_pci_sysfs.c
31939@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
31940 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
31941 static int edac_pci_poll_msec = 1000; /* one second workq period */
31942
31943-static atomic_t pci_parity_count = ATOMIC_INIT(0);
31944-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
31945+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
31946+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
31947
31948 static struct kobject *edac_pci_top_main_kobj;
31949 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
31950@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
31951 }
31952
31953 /* fs_ops table */
31954-static struct sysfs_ops pci_instance_ops = {
31955+static const struct sysfs_ops pci_instance_ops = {
31956 .show = edac_pci_instance_show,
31957 .store = edac_pci_instance_store
31958 };
31959@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
31960 return -EIO;
31961 }
31962
31963-static struct sysfs_ops edac_pci_sysfs_ops = {
31964+static const struct sysfs_ops edac_pci_sysfs_ops = {
31965 .show = edac_pci_dev_show,
31966 .store = edac_pci_dev_store
31967 };
31968@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
31969 edac_printk(KERN_CRIT, EDAC_PCI,
31970 "Signaled System Error on %s\n",
31971 pci_name(dev));
31972- atomic_inc(&pci_nonparity_count);
31973+ atomic_inc_unchecked(&pci_nonparity_count);
31974 }
31975
31976 if (status & (PCI_STATUS_PARITY)) {
31977@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
31978 "Master Data Parity Error on %s\n",
31979 pci_name(dev));
31980
31981- atomic_inc(&pci_parity_count);
31982+ atomic_inc_unchecked(&pci_parity_count);
31983 }
31984
31985 if (status & (PCI_STATUS_DETECTED_PARITY)) {
31986@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
31987 "Detected Parity Error on %s\n",
31988 pci_name(dev));
31989
31990- atomic_inc(&pci_parity_count);
31991+ atomic_inc_unchecked(&pci_parity_count);
31992 }
31993 }
31994
31995@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
31996 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
31997 "Signaled System Error on %s\n",
31998 pci_name(dev));
31999- atomic_inc(&pci_nonparity_count);
32000+ atomic_inc_unchecked(&pci_nonparity_count);
32001 }
32002
32003 if (status & (PCI_STATUS_PARITY)) {
32004@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32005 "Master Data Parity Error on "
32006 "%s\n", pci_name(dev));
32007
32008- atomic_inc(&pci_parity_count);
32009+ atomic_inc_unchecked(&pci_parity_count);
32010 }
32011
32012 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32013@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32014 "Detected Parity Error on %s\n",
32015 pci_name(dev));
32016
32017- atomic_inc(&pci_parity_count);
32018+ atomic_inc_unchecked(&pci_parity_count);
32019 }
32020 }
32021 }
32022@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
32023 if (!check_pci_errors)
32024 return;
32025
32026- before_count = atomic_read(&pci_parity_count);
32027+ before_count = atomic_read_unchecked(&pci_parity_count);
32028
32029 /* scan all PCI devices looking for a Parity Error on devices and
32030 * bridges.
32031@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
32032 /* Only if operator has selected panic on PCI Error */
32033 if (edac_pci_get_panic_on_pe()) {
32034 /* If the count is different 'after' from 'before' */
32035- if (before_count != atomic_read(&pci_parity_count))
32036+ if (before_count != atomic_read_unchecked(&pci_parity_count))
32037 panic("EDAC: PCI Parity Error");
32038 }
32039 }
32040diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
32041index 6c9a0f2..9c1cf7e 100644
32042--- a/drivers/edac/i3000_edac.c
32043+++ b/drivers/edac/i3000_edac.c
32044@@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
32045 edac_mc_free(mci);
32046 }
32047
32048-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
32049+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
32050 {
32051 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32052 I3000},
32053diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
32054index fde4db9..fe108f9 100644
32055--- a/drivers/edac/i3200_edac.c
32056+++ b/drivers/edac/i3200_edac.c
32057@@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
32058 edac_mc_free(mci);
32059 }
32060
32061-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
32062+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
32063 {
32064 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32065 I3200},
32066diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
32067index adc10a2..57d4ccf 100644
32068--- a/drivers/edac/i5000_edac.c
32069+++ b/drivers/edac/i5000_edac.c
32070@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
32071 *
32072 * The "E500P" device is the first device supported.
32073 */
32074-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
32075+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
32076 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
32077 .driver_data = I5000P},
32078
32079diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
32080index 22db05a..b2b5503 100644
32081--- a/drivers/edac/i5100_edac.c
32082+++ b/drivers/edac/i5100_edac.c
32083@@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
32084 edac_mc_free(mci);
32085 }
32086
32087-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
32088+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
32089 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
32090 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
32091 { 0, }
32092diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
32093index f99d106..f050710 100644
32094--- a/drivers/edac/i5400_edac.c
32095+++ b/drivers/edac/i5400_edac.c
32096@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
32097 *
32098 * The "E500P" device is the first device supported.
32099 */
32100-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
32101+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
32102 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
32103 {0,} /* 0 terminated list. */
32104 };
32105diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
32106index 577760a..9ce16ce 100644
32107--- a/drivers/edac/i82443bxgx_edac.c
32108+++ b/drivers/edac/i82443bxgx_edac.c
32109@@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
32110
32111 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
32112
32113-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
32114+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
32115 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
32116 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
32117 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
32118diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
32119index c0088ba..64a7b98 100644
32120--- a/drivers/edac/i82860_edac.c
32121+++ b/drivers/edac/i82860_edac.c
32122@@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
32123 edac_mc_free(mci);
32124 }
32125
32126-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
32127+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
32128 {
32129 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32130 I82860},
32131diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
32132index b2d83b9..a34357b 100644
32133--- a/drivers/edac/i82875p_edac.c
32134+++ b/drivers/edac/i82875p_edac.c
32135@@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
32136 edac_mc_free(mci);
32137 }
32138
32139-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
32140+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
32141 {
32142 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32143 I82875P},
32144diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
32145index 2eed3ea..87bbbd1 100644
32146--- a/drivers/edac/i82975x_edac.c
32147+++ b/drivers/edac/i82975x_edac.c
32148@@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
32149 edac_mc_free(mci);
32150 }
32151
32152-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
32153+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
32154 {
32155 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32156 I82975X
32157diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
32158index 9900675..78ac2b6 100644
32159--- a/drivers/edac/r82600_edac.c
32160+++ b/drivers/edac/r82600_edac.c
32161@@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
32162 edac_mc_free(mci);
32163 }
32164
32165-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
32166+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
32167 {
32168 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
32169 },
32170diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
32171index d4ec605..4cfec4e 100644
32172--- a/drivers/edac/x38_edac.c
32173+++ b/drivers/edac/x38_edac.c
32174@@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
32175 edac_mc_free(mci);
32176 }
32177
32178-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
32179+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
32180 {
32181 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32182 X38},
32183diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32184index 3fc2ceb..daf098f 100644
32185--- a/drivers/firewire/core-card.c
32186+++ b/drivers/firewire/core-card.c
32187@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
32188
32189 void fw_core_remove_card(struct fw_card *card)
32190 {
32191- struct fw_card_driver dummy_driver = dummy_driver_template;
32192+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
32193
32194 card->driver->update_phy_reg(card, 4,
32195 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32196diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32197index 4560d8f..36db24a 100644
32198--- a/drivers/firewire/core-cdev.c
32199+++ b/drivers/firewire/core-cdev.c
32200@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
32201 int ret;
32202
32203 if ((request->channels == 0 && request->bandwidth == 0) ||
32204- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32205- request->bandwidth < 0)
32206+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32207 return -EINVAL;
32208
32209 r = kmalloc(sizeof(*r), GFP_KERNEL);
32210diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32211index da628c7..cf54a2c 100644
32212--- a/drivers/firewire/core-transaction.c
32213+++ b/drivers/firewire/core-transaction.c
32214@@ -36,6 +36,7 @@
32215 #include <linux/string.h>
32216 #include <linux/timer.h>
32217 #include <linux/types.h>
32218+#include <linux/sched.h>
32219
32220 #include <asm/byteorder.h>
32221
32222@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
32223 struct transaction_callback_data d;
32224 struct fw_transaction t;
32225
32226+ pax_track_stack();
32227+
32228 init_completion(&d.done);
32229 d.payload = payload;
32230 fw_send_request(card, &t, tcode, destination_id, generation, speed,
32231diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32232index 7ff6e75..a2965d9 100644
32233--- a/drivers/firewire/core.h
32234+++ b/drivers/firewire/core.h
32235@@ -86,6 +86,7 @@ struct fw_card_driver {
32236
32237 int (*stop_iso)(struct fw_iso_context *ctx);
32238 };
32239+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32240
32241 void fw_card_initialize(struct fw_card *card,
32242 const struct fw_card_driver *driver, struct device *device);
32243diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32244index 3a2ccb0..82fd7c4 100644
32245--- a/drivers/firmware/dmi_scan.c
32246+++ b/drivers/firmware/dmi_scan.c
32247@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
32248 }
32249 }
32250 else {
32251- /*
32252- * no iounmap() for that ioremap(); it would be a no-op, but
32253- * it's so early in setup that sucker gets confused into doing
32254- * what it shouldn't if we actually call it.
32255- */
32256 p = dmi_ioremap(0xF0000, 0x10000);
32257 if (p == NULL)
32258 goto error;
32259@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32260 if (buf == NULL)
32261 return -1;
32262
32263- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32264+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32265
32266 iounmap(buf);
32267 return 0;
32268diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
32269index 9e4f59d..110e24e 100644
32270--- a/drivers/firmware/edd.c
32271+++ b/drivers/firmware/edd.c
32272@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
32273 return ret;
32274 }
32275
32276-static struct sysfs_ops edd_attr_ops = {
32277+static const struct sysfs_ops edd_attr_ops = {
32278 .show = edd_attr_show,
32279 };
32280
32281diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32282index f4f709d..082f06e 100644
32283--- a/drivers/firmware/efivars.c
32284+++ b/drivers/firmware/efivars.c
32285@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
32286 return ret;
32287 }
32288
32289-static struct sysfs_ops efivar_attr_ops = {
32290+static const struct sysfs_ops efivar_attr_ops = {
32291 .show = efivar_attr_show,
32292 .store = efivar_attr_store,
32293 };
32294diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
32295index 051d1eb..0a5d4e7 100644
32296--- a/drivers/firmware/iscsi_ibft.c
32297+++ b/drivers/firmware/iscsi_ibft.c
32298@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
32299 return ret;
32300 }
32301
32302-static struct sysfs_ops ibft_attr_ops = {
32303+static const struct sysfs_ops ibft_attr_ops = {
32304 .show = ibft_show_attribute,
32305 };
32306
32307diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
32308index 56f9234..8c58c7b 100644
32309--- a/drivers/firmware/memmap.c
32310+++ b/drivers/firmware/memmap.c
32311@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
32312 NULL
32313 };
32314
32315-static struct sysfs_ops memmap_attr_ops = {
32316+static const struct sysfs_ops memmap_attr_ops = {
32317 .show = memmap_attr_show,
32318 };
32319
32320diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
32321index b16c9a8..2af7d3f 100644
32322--- a/drivers/gpio/vr41xx_giu.c
32323+++ b/drivers/gpio/vr41xx_giu.c
32324@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32325 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32326 maskl, pendl, maskh, pendh);
32327
32328- atomic_inc(&irq_err_count);
32329+ atomic_inc_unchecked(&irq_err_count);
32330
32331 return -EINVAL;
32332 }
32333diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
32334index bea6efc..3dc0f42 100644
32335--- a/drivers/gpu/drm/drm_crtc.c
32336+++ b/drivers/gpu/drm/drm_crtc.c
32337@@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32338 */
32339 if ((out_resp->count_modes >= mode_count) && mode_count) {
32340 copied = 0;
32341- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
32342+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
32343 list_for_each_entry(mode, &connector->modes, head) {
32344 drm_crtc_convert_to_umode(&u_mode, mode);
32345 if (copy_to_user(mode_ptr + copied,
32346@@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32347
32348 if ((out_resp->count_props >= props_count) && props_count) {
32349 copied = 0;
32350- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
32351- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
32352+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
32353+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
32354 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
32355 if (connector->property_ids[i] != 0) {
32356 if (put_user(connector->property_ids[i],
32357@@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32358
32359 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
32360 copied = 0;
32361- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
32362+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
32363 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
32364 if (connector->encoder_ids[i] != 0) {
32365 if (put_user(connector->encoder_ids[i],
32366@@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
32367 }
32368
32369 for (i = 0; i < crtc_req->count_connectors; i++) {
32370- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
32371+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
32372 if (get_user(out_id, &set_connectors_ptr[i])) {
32373 ret = -EFAULT;
32374 goto out;
32375@@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32376 out_resp->flags = property->flags;
32377
32378 if ((out_resp->count_values >= value_count) && value_count) {
32379- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
32380+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
32381 for (i = 0; i < value_count; i++) {
32382 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
32383 ret = -EFAULT;
32384@@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32385 if (property->flags & DRM_MODE_PROP_ENUM) {
32386 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
32387 copied = 0;
32388- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
32389+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
32390 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
32391
32392 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
32393@@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32394 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
32395 copied = 0;
32396 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
32397- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
32398+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
32399
32400 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
32401 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
32402@@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
32403 blob = obj_to_blob(obj);
32404
32405 if (out_resp->length == blob->length) {
32406- blob_ptr = (void *)(unsigned long)out_resp->data;
32407+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
32408 if (copy_to_user(blob_ptr, blob->data, blob->length)){
32409 ret = -EFAULT;
32410 goto done;
32411diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32412index 1b8745d..92fdbf6 100644
32413--- a/drivers/gpu/drm/drm_crtc_helper.c
32414+++ b/drivers/gpu/drm/drm_crtc_helper.c
32415@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32416 struct drm_crtc *tmp;
32417 int crtc_mask = 1;
32418
32419- WARN(!crtc, "checking null crtc?");
32420+ BUG_ON(!crtc);
32421
32422 dev = crtc->dev;
32423
32424@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
32425
32426 adjusted_mode = drm_mode_duplicate(dev, mode);
32427
32428+ pax_track_stack();
32429+
32430 crtc->enabled = drm_helper_crtc_in_use(crtc);
32431
32432 if (!crtc->enabled)
32433diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32434index 0e27d98..dec8768 100644
32435--- a/drivers/gpu/drm/drm_drv.c
32436+++ b/drivers/gpu/drm/drm_drv.c
32437@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
32438 char *kdata = NULL;
32439
32440 atomic_inc(&dev->ioctl_count);
32441- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32442+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32443 ++file_priv->ioctl_count;
32444
32445 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32446diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32447index ba14553..182d0bb 100644
32448--- a/drivers/gpu/drm/drm_fops.c
32449+++ b/drivers/gpu/drm/drm_fops.c
32450@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
32451 }
32452
32453 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32454- atomic_set(&dev->counts[i], 0);
32455+ atomic_set_unchecked(&dev->counts[i], 0);
32456
32457 dev->sigdata.lock = NULL;
32458
32459@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
32460
32461 retcode = drm_open_helper(inode, filp, dev);
32462 if (!retcode) {
32463- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32464+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32465 spin_lock(&dev->count_lock);
32466- if (!dev->open_count++) {
32467+ if (local_inc_return(&dev->open_count) == 1) {
32468 spin_unlock(&dev->count_lock);
32469 retcode = drm_setup(dev);
32470 goto out;
32471@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
32472
32473 lock_kernel();
32474
32475- DRM_DEBUG("open_count = %d\n", dev->open_count);
32476+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
32477
32478 if (dev->driver->preclose)
32479 dev->driver->preclose(dev, file_priv);
32480@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
32481 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32482 task_pid_nr(current),
32483 (long)old_encode_dev(file_priv->minor->device),
32484- dev->open_count);
32485+ local_read(&dev->open_count));
32486
32487 /* if the master has gone away we can't do anything with the lock */
32488 if (file_priv->minor->master)
32489@@ -524,9 +524,9 @@ int drm_release(struct inode *inode, struct file *filp)
32490 * End inline drm_release
32491 */
32492
32493- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32494+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32495 spin_lock(&dev->count_lock);
32496- if (!--dev->open_count) {
32497+ if (local_dec_and_test(&dev->open_count)) {
32498 if (atomic_read(&dev->ioctl_count)) {
32499 DRM_ERROR("Device busy: %d\n",
32500 atomic_read(&dev->ioctl_count));
32501diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
32502index 8bf3770..7942280 100644
32503--- a/drivers/gpu/drm/drm_gem.c
32504+++ b/drivers/gpu/drm/drm_gem.c
32505@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
32506 spin_lock_init(&dev->object_name_lock);
32507 idr_init(&dev->object_name_idr);
32508 atomic_set(&dev->object_count, 0);
32509- atomic_set(&dev->object_memory, 0);
32510+ atomic_set_unchecked(&dev->object_memory, 0);
32511 atomic_set(&dev->pin_count, 0);
32512- atomic_set(&dev->pin_memory, 0);
32513+ atomic_set_unchecked(&dev->pin_memory, 0);
32514 atomic_set(&dev->gtt_count, 0);
32515- atomic_set(&dev->gtt_memory, 0);
32516+ atomic_set_unchecked(&dev->gtt_memory, 0);
32517
32518 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
32519 if (!mm) {
32520@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
32521 goto fput;
32522 }
32523 atomic_inc(&dev->object_count);
32524- atomic_add(obj->size, &dev->object_memory);
32525+ atomic_add_unchecked(obj->size, &dev->object_memory);
32526 return obj;
32527 fput:
32528 fput(obj->filp);
32529@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
32530
32531 fput(obj->filp);
32532 atomic_dec(&dev->object_count);
32533- atomic_sub(obj->size, &dev->object_memory);
32534+ atomic_sub_unchecked(obj->size, &dev->object_memory);
32535 kfree(obj);
32536 }
32537 EXPORT_SYMBOL(drm_gem_object_free);
32538diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
32539index f0f6c6b..34af322 100644
32540--- a/drivers/gpu/drm/drm_info.c
32541+++ b/drivers/gpu/drm/drm_info.c
32542@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
32543 struct drm_local_map *map;
32544 struct drm_map_list *r_list;
32545
32546- /* Hardcoded from _DRM_FRAME_BUFFER,
32547- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
32548- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
32549- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
32550+ static const char * const types[] = {
32551+ [_DRM_FRAME_BUFFER] = "FB",
32552+ [_DRM_REGISTERS] = "REG",
32553+ [_DRM_SHM] = "SHM",
32554+ [_DRM_AGP] = "AGP",
32555+ [_DRM_SCATTER_GATHER] = "SG",
32556+ [_DRM_CONSISTENT] = "PCI",
32557+ [_DRM_GEM] = "GEM" };
32558 const char *type;
32559 int i;
32560
32561@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
32562 map = r_list->map;
32563 if (!map)
32564 continue;
32565- if (map->type < 0 || map->type > 5)
32566+ if (map->type >= ARRAY_SIZE(types))
32567 type = "??";
32568 else
32569 type = types[map->type];
32570@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
32571 struct drm_device *dev = node->minor->dev;
32572
32573 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
32574- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
32575+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
32576 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
32577- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
32578- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
32579+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
32580+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
32581 seq_printf(m, "%d gtt total\n", dev->gtt_total);
32582 return 0;
32583 }
32584@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
32585 mutex_lock(&dev->struct_mutex);
32586 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
32587 atomic_read(&dev->vma_count),
32588+#ifdef CONFIG_GRKERNSEC_HIDESYM
32589+ NULL, 0);
32590+#else
32591 high_memory, (u64)virt_to_phys(high_memory));
32592+#endif
32593
32594 list_for_each_entry(pt, &dev->vmalist, head) {
32595 vma = pt->vma;
32596@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
32597 continue;
32598 seq_printf(m,
32599 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
32600- pt->pid, vma->vm_start, vma->vm_end,
32601+ pt->pid,
32602+#ifdef CONFIG_GRKERNSEC_HIDESYM
32603+ 0, 0,
32604+#else
32605+ vma->vm_start, vma->vm_end,
32606+#endif
32607 vma->vm_flags & VM_READ ? 'r' : '-',
32608 vma->vm_flags & VM_WRITE ? 'w' : '-',
32609 vma->vm_flags & VM_EXEC ? 'x' : '-',
32610 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
32611 vma->vm_flags & VM_LOCKED ? 'l' : '-',
32612 vma->vm_flags & VM_IO ? 'i' : '-',
32613+#ifdef CONFIG_GRKERNSEC_HIDESYM
32614+ 0);
32615+#else
32616 vma->vm_pgoff);
32617+#endif
32618
32619 #if defined(__i386__)
32620 pgprot = pgprot_val(vma->vm_page_prot);
32621diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
32622index 282d9fd..71e5f11 100644
32623--- a/drivers/gpu/drm/drm_ioc32.c
32624+++ b/drivers/gpu/drm/drm_ioc32.c
32625@@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
32626 request = compat_alloc_user_space(nbytes);
32627 if (!access_ok(VERIFY_WRITE, request, nbytes))
32628 return -EFAULT;
32629- list = (struct drm_buf_desc *) (request + 1);
32630+ list = (struct drm_buf_desc __user *) (request + 1);
32631
32632 if (__put_user(count, &request->count)
32633 || __put_user(list, &request->list))
32634@@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
32635 request = compat_alloc_user_space(nbytes);
32636 if (!access_ok(VERIFY_WRITE, request, nbytes))
32637 return -EFAULT;
32638- list = (struct drm_buf_pub *) (request + 1);
32639+ list = (struct drm_buf_pub __user *) (request + 1);
32640
32641 if (__put_user(count, &request->count)
32642 || __put_user(list, &request->list))
32643diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
32644index 9b9ff46..4ea724c 100644
32645--- a/drivers/gpu/drm/drm_ioctl.c
32646+++ b/drivers/gpu/drm/drm_ioctl.c
32647@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
32648 stats->data[i].value =
32649 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
32650 else
32651- stats->data[i].value = atomic_read(&dev->counts[i]);
32652+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
32653 stats->data[i].type = dev->types[i];
32654 }
32655
32656diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
32657index e2f70a5..c703e86 100644
32658--- a/drivers/gpu/drm/drm_lock.c
32659+++ b/drivers/gpu/drm/drm_lock.c
32660@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32661 if (drm_lock_take(&master->lock, lock->context)) {
32662 master->lock.file_priv = file_priv;
32663 master->lock.lock_time = jiffies;
32664- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
32665+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
32666 break; /* Got lock */
32667 }
32668
32669@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32670 return -EINVAL;
32671 }
32672
32673- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
32674+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
32675
32676 /* kernel_context_switch isn't used by any of the x86 drm
32677 * modules but is required by the Sparc driver.
32678diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
32679index 7d1d88c..b9131b2 100644
32680--- a/drivers/gpu/drm/i810/i810_dma.c
32681+++ b/drivers/gpu/drm/i810/i810_dma.c
32682@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
32683 dma->buflist[vertex->idx],
32684 vertex->discard, vertex->used);
32685
32686- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32687- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32688+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32689+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32690 sarea_priv->last_enqueue = dev_priv->counter - 1;
32691 sarea_priv->last_dispatch = (int)hw_status[5];
32692
32693@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
32694 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
32695 mc->last_render);
32696
32697- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32698- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32699+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32700+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32701 sarea_priv->last_enqueue = dev_priv->counter - 1;
32702 sarea_priv->last_dispatch = (int)hw_status[5];
32703
32704diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
32705index 21e2691..7321edd 100644
32706--- a/drivers/gpu/drm/i810/i810_drv.h
32707+++ b/drivers/gpu/drm/i810/i810_drv.h
32708@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
32709 int page_flipping;
32710
32711 wait_queue_head_t irq_queue;
32712- atomic_t irq_received;
32713- atomic_t irq_emitted;
32714+ atomic_unchecked_t irq_received;
32715+ atomic_unchecked_t irq_emitted;
32716
32717 int front_offset;
32718 } drm_i810_private_t;
32719diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
32720index da82afe..48a45de 100644
32721--- a/drivers/gpu/drm/i830/i830_drv.h
32722+++ b/drivers/gpu/drm/i830/i830_drv.h
32723@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
32724 int page_flipping;
32725
32726 wait_queue_head_t irq_queue;
32727- atomic_t irq_received;
32728- atomic_t irq_emitted;
32729+ atomic_unchecked_t irq_received;
32730+ atomic_unchecked_t irq_emitted;
32731
32732 int use_mi_batchbuffer_start;
32733
32734diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
32735index 91ec2bb..6f21fab 100644
32736--- a/drivers/gpu/drm/i830/i830_irq.c
32737+++ b/drivers/gpu/drm/i830/i830_irq.c
32738@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
32739
32740 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
32741
32742- atomic_inc(&dev_priv->irq_received);
32743+ atomic_inc_unchecked(&dev_priv->irq_received);
32744 wake_up_interruptible(&dev_priv->irq_queue);
32745
32746 return IRQ_HANDLED;
32747@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
32748
32749 DRM_DEBUG("%s\n", __func__);
32750
32751- atomic_inc(&dev_priv->irq_emitted);
32752+ atomic_inc_unchecked(&dev_priv->irq_emitted);
32753
32754 BEGIN_LP_RING(2);
32755 OUT_RING(0);
32756 OUT_RING(GFX_OP_USER_INTERRUPT);
32757 ADVANCE_LP_RING();
32758
32759- return atomic_read(&dev_priv->irq_emitted);
32760+ return atomic_read_unchecked(&dev_priv->irq_emitted);
32761 }
32762
32763 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32764@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32765
32766 DRM_DEBUG("%s\n", __func__);
32767
32768- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
32769+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
32770 return 0;
32771
32772 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
32773@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32774
32775 for (;;) {
32776 __set_current_state(TASK_INTERRUPTIBLE);
32777- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
32778+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
32779 break;
32780 if ((signed)(end - jiffies) <= 0) {
32781 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
32782@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
32783 I830_WRITE16(I830REG_HWSTAM, 0xffff);
32784 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
32785 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
32786- atomic_set(&dev_priv->irq_received, 0);
32787- atomic_set(&dev_priv->irq_emitted, 0);
32788+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32789+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
32790 init_waitqueue_head(&dev_priv->irq_queue);
32791 }
32792
32793diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
32794index 288fc50..c6092055 100644
32795--- a/drivers/gpu/drm/i915/dvo.h
32796+++ b/drivers/gpu/drm/i915/dvo.h
32797@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
32798 *
32799 * \return singly-linked list of modes or NULL if no modes found.
32800 */
32801- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
32802+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
32803
32804 /**
32805 * Clean up driver-specific bits of the output
32806 */
32807- void (*destroy) (struct intel_dvo_device *dvo);
32808+ void (* const destroy) (struct intel_dvo_device *dvo);
32809
32810 /**
32811 * Debugging hook to dump device registers to log file
32812 */
32813- void (*dump_regs)(struct intel_dvo_device *dvo);
32814+ void (* const dump_regs)(struct intel_dvo_device *dvo);
32815 };
32816
32817-extern struct intel_dvo_dev_ops sil164_ops;
32818-extern struct intel_dvo_dev_ops ch7xxx_ops;
32819-extern struct intel_dvo_dev_ops ivch_ops;
32820-extern struct intel_dvo_dev_ops tfp410_ops;
32821-extern struct intel_dvo_dev_ops ch7017_ops;
32822+extern const struct intel_dvo_dev_ops sil164_ops;
32823+extern const struct intel_dvo_dev_ops ch7xxx_ops;
32824+extern const struct intel_dvo_dev_ops ivch_ops;
32825+extern const struct intel_dvo_dev_ops tfp410_ops;
32826+extern const struct intel_dvo_dev_ops ch7017_ops;
32827
32828 #endif /* _INTEL_DVO_H */
32829diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
32830index 621815b..499d82e 100644
32831--- a/drivers/gpu/drm/i915/dvo_ch7017.c
32832+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
32833@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
32834 }
32835 }
32836
32837-struct intel_dvo_dev_ops ch7017_ops = {
32838+const struct intel_dvo_dev_ops ch7017_ops = {
32839 .init = ch7017_init,
32840 .detect = ch7017_detect,
32841 .mode_valid = ch7017_mode_valid,
32842diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
32843index a9b8962..ac769ba 100644
32844--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
32845+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
32846@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
32847 }
32848 }
32849
32850-struct intel_dvo_dev_ops ch7xxx_ops = {
32851+const struct intel_dvo_dev_ops ch7xxx_ops = {
32852 .init = ch7xxx_init,
32853 .detect = ch7xxx_detect,
32854 .mode_valid = ch7xxx_mode_valid,
32855diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
32856index aa176f9..ed2930c 100644
32857--- a/drivers/gpu/drm/i915/dvo_ivch.c
32858+++ b/drivers/gpu/drm/i915/dvo_ivch.c
32859@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
32860 }
32861 }
32862
32863-struct intel_dvo_dev_ops ivch_ops= {
32864+const struct intel_dvo_dev_ops ivch_ops= {
32865 .init = ivch_init,
32866 .dpms = ivch_dpms,
32867 .save = ivch_save,
32868diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
32869index e1c1f73..7dbebcf 100644
32870--- a/drivers/gpu/drm/i915/dvo_sil164.c
32871+++ b/drivers/gpu/drm/i915/dvo_sil164.c
32872@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
32873 }
32874 }
32875
32876-struct intel_dvo_dev_ops sil164_ops = {
32877+const struct intel_dvo_dev_ops sil164_ops = {
32878 .init = sil164_init,
32879 .detect = sil164_detect,
32880 .mode_valid = sil164_mode_valid,
32881diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
32882index 16dce84..7e1b6f8 100644
32883--- a/drivers/gpu/drm/i915/dvo_tfp410.c
32884+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
32885@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
32886 }
32887 }
32888
32889-struct intel_dvo_dev_ops tfp410_ops = {
32890+const struct intel_dvo_dev_ops tfp410_ops = {
32891 .init = tfp410_init,
32892 .detect = tfp410_detect,
32893 .mode_valid = tfp410_mode_valid,
32894diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
32895index 7e859d6..7d1cf2b 100644
32896--- a/drivers/gpu/drm/i915/i915_debugfs.c
32897+++ b/drivers/gpu/drm/i915/i915_debugfs.c
32898@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
32899 I915_READ(GTIMR));
32900 }
32901 seq_printf(m, "Interrupts received: %d\n",
32902- atomic_read(&dev_priv->irq_received));
32903+ atomic_read_unchecked(&dev_priv->irq_received));
32904 if (dev_priv->hw_status_page != NULL) {
32905 seq_printf(m, "Current sequence: %d\n",
32906 i915_get_gem_seqno(dev));
32907diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
32908index 5449239..7e4f68d 100644
32909--- a/drivers/gpu/drm/i915/i915_drv.c
32910+++ b/drivers/gpu/drm/i915/i915_drv.c
32911@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
32912 return i915_resume(dev);
32913 }
32914
32915-static struct vm_operations_struct i915_gem_vm_ops = {
32916+static const struct vm_operations_struct i915_gem_vm_ops = {
32917 .fault = i915_gem_fault,
32918 .open = drm_gem_vm_open,
32919 .close = drm_gem_vm_close,
32920diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
32921index 97163f7..c24c7c7 100644
32922--- a/drivers/gpu/drm/i915/i915_drv.h
32923+++ b/drivers/gpu/drm/i915/i915_drv.h
32924@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
32925 /* display clock increase/decrease */
32926 /* pll clock increase/decrease */
32927 /* clock gating init */
32928-};
32929+} __no_const;
32930
32931 typedef struct drm_i915_private {
32932 struct drm_device *dev;
32933@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
32934 int page_flipping;
32935
32936 wait_queue_head_t irq_queue;
32937- atomic_t irq_received;
32938+ atomic_unchecked_t irq_received;
32939 /** Protects user_irq_refcount and irq_mask_reg */
32940 spinlock_t user_irq_lock;
32941 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
32942diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
32943index 27a3074..eb3f959 100644
32944--- a/drivers/gpu/drm/i915/i915_gem.c
32945+++ b/drivers/gpu/drm/i915/i915_gem.c
32946@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
32947
32948 args->aper_size = dev->gtt_total;
32949 args->aper_available_size = (args->aper_size -
32950- atomic_read(&dev->pin_memory));
32951+ atomic_read_unchecked(&dev->pin_memory));
32952
32953 return 0;
32954 }
32955@@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
32956
32957 if (obj_priv->gtt_space) {
32958 atomic_dec(&dev->gtt_count);
32959- atomic_sub(obj->size, &dev->gtt_memory);
32960+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
32961
32962 drm_mm_put_block(obj_priv->gtt_space);
32963 obj_priv->gtt_space = NULL;
32964@@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
32965 goto search_free;
32966 }
32967 atomic_inc(&dev->gtt_count);
32968- atomic_add(obj->size, &dev->gtt_memory);
32969+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
32970
32971 /* Assert that the object is not currently in any GPU domain. As it
32972 * wasn't in the GTT, there shouldn't be any way it could have been in
32973@@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
32974 "%d/%d gtt bytes\n",
32975 atomic_read(&dev->object_count),
32976 atomic_read(&dev->pin_count),
32977- atomic_read(&dev->object_memory),
32978- atomic_read(&dev->pin_memory),
32979- atomic_read(&dev->gtt_memory),
32980+ atomic_read_unchecked(&dev->object_memory),
32981+ atomic_read_unchecked(&dev->pin_memory),
32982+ atomic_read_unchecked(&dev->gtt_memory),
32983 dev->gtt_total);
32984 }
32985 goto err;
32986@@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
32987 */
32988 if (obj_priv->pin_count == 1) {
32989 atomic_inc(&dev->pin_count);
32990- atomic_add(obj->size, &dev->pin_memory);
32991+ atomic_add_unchecked(obj->size, &dev->pin_memory);
32992 if (!obj_priv->active &&
32993 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
32994 !list_empty(&obj_priv->list))
32995@@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
32996 list_move_tail(&obj_priv->list,
32997 &dev_priv->mm.inactive_list);
32998 atomic_dec(&dev->pin_count);
32999- atomic_sub(obj->size, &dev->pin_memory);
33000+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
33001 }
33002 i915_verify_inactive(dev, __FILE__, __LINE__);
33003 }
33004diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33005index 63f28ad..f5469da 100644
33006--- a/drivers/gpu/drm/i915/i915_irq.c
33007+++ b/drivers/gpu/drm/i915/i915_irq.c
33008@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
33009 int irq_received;
33010 int ret = IRQ_NONE;
33011
33012- atomic_inc(&dev_priv->irq_received);
33013+ atomic_inc_unchecked(&dev_priv->irq_received);
33014
33015 if (IS_IGDNG(dev))
33016 return igdng_irq_handler(dev);
33017@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
33018 {
33019 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33020
33021- atomic_set(&dev_priv->irq_received, 0);
33022+ atomic_set_unchecked(&dev_priv->irq_received, 0);
33023
33024 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
33025 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
33026diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
33027index 5d9c6a7..d1b0e29 100644
33028--- a/drivers/gpu/drm/i915/intel_sdvo.c
33029+++ b/drivers/gpu/drm/i915/intel_sdvo.c
33030@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
33031 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
33032
33033 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
33034- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33035+ pax_open_kernel();
33036+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33037+ pax_close_kernel();
33038
33039 /* Read the regs to test if we can talk to the device */
33040 for (i = 0; i < 0x40; i++) {
33041diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33042index be6c6b9..8615d9c 100644
33043--- a/drivers/gpu/drm/mga/mga_drv.h
33044+++ b/drivers/gpu/drm/mga/mga_drv.h
33045@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33046 u32 clear_cmd;
33047 u32 maccess;
33048
33049- atomic_t vbl_received; /**< Number of vblanks received. */
33050+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33051 wait_queue_head_t fence_queue;
33052- atomic_t last_fence_retired;
33053+ atomic_unchecked_t last_fence_retired;
33054 u32 next_fence_to_post;
33055
33056 unsigned int fb_cpp;
33057diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33058index daa6041..a28a5da 100644
33059--- a/drivers/gpu/drm/mga/mga_irq.c
33060+++ b/drivers/gpu/drm/mga/mga_irq.c
33061@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33062 if (crtc != 0)
33063 return 0;
33064
33065- return atomic_read(&dev_priv->vbl_received);
33066+ return atomic_read_unchecked(&dev_priv->vbl_received);
33067 }
33068
33069
33070@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33071 /* VBLANK interrupt */
33072 if (status & MGA_VLINEPEN) {
33073 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33074- atomic_inc(&dev_priv->vbl_received);
33075+ atomic_inc_unchecked(&dev_priv->vbl_received);
33076 drm_handle_vblank(dev, 0);
33077 handled = 1;
33078 }
33079@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33080 MGA_WRITE(MGA_PRIMEND, prim_end);
33081 }
33082
33083- atomic_inc(&dev_priv->last_fence_retired);
33084+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
33085 DRM_WAKEUP(&dev_priv->fence_queue);
33086 handled = 1;
33087 }
33088@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
33089 * using fences.
33090 */
33091 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33092- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33093+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33094 - *sequence) <= (1 << 23)));
33095
33096 *sequence = cur_fence;
33097diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33098index 4c39a40..b22a9ea 100644
33099--- a/drivers/gpu/drm/r128/r128_cce.c
33100+++ b/drivers/gpu/drm/r128/r128_cce.c
33101@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
33102
33103 /* GH: Simple idle check.
33104 */
33105- atomic_set(&dev_priv->idle_count, 0);
33106+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33107
33108 /* We don't support anything other than bus-mastering ring mode,
33109 * but the ring can be in either AGP or PCI space for the ring
33110diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33111index 3c60829..4faf484 100644
33112--- a/drivers/gpu/drm/r128/r128_drv.h
33113+++ b/drivers/gpu/drm/r128/r128_drv.h
33114@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33115 int is_pci;
33116 unsigned long cce_buffers_offset;
33117
33118- atomic_t idle_count;
33119+ atomic_unchecked_t idle_count;
33120
33121 int page_flipping;
33122 int current_page;
33123 u32 crtc_offset;
33124 u32 crtc_offset_cntl;
33125
33126- atomic_t vbl_received;
33127+ atomic_unchecked_t vbl_received;
33128
33129 u32 color_fmt;
33130 unsigned int front_offset;
33131diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33132index 69810fb..97bf17a 100644
33133--- a/drivers/gpu/drm/r128/r128_irq.c
33134+++ b/drivers/gpu/drm/r128/r128_irq.c
33135@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33136 if (crtc != 0)
33137 return 0;
33138
33139- return atomic_read(&dev_priv->vbl_received);
33140+ return atomic_read_unchecked(&dev_priv->vbl_received);
33141 }
33142
33143 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33144@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33145 /* VBLANK interrupt */
33146 if (status & R128_CRTC_VBLANK_INT) {
33147 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33148- atomic_inc(&dev_priv->vbl_received);
33149+ atomic_inc_unchecked(&dev_priv->vbl_received);
33150 drm_handle_vblank(dev, 0);
33151 return IRQ_HANDLED;
33152 }
33153diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33154index af2665c..51922d2 100644
33155--- a/drivers/gpu/drm/r128/r128_state.c
33156+++ b/drivers/gpu/drm/r128/r128_state.c
33157@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
33158
33159 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
33160 {
33161- if (atomic_read(&dev_priv->idle_count) == 0) {
33162+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
33163 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33164 } else {
33165- atomic_set(&dev_priv->idle_count, 0);
33166+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33167 }
33168 }
33169
33170diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
33171index dd72b91..8644b3c 100644
33172--- a/drivers/gpu/drm/radeon/atom.c
33173+++ b/drivers/gpu/drm/radeon/atom.c
33174@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
33175 char name[512];
33176 int i;
33177
33178+ pax_track_stack();
33179+
33180 ctx->card = card;
33181 ctx->bios = bios;
33182
33183diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33184index 0d79577..efaa7a5 100644
33185--- a/drivers/gpu/drm/radeon/mkregtable.c
33186+++ b/drivers/gpu/drm/radeon/mkregtable.c
33187@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33188 regex_t mask_rex;
33189 regmatch_t match[4];
33190 char buf[1024];
33191- size_t end;
33192+ long end;
33193 int len;
33194 int done = 0;
33195 int r;
33196 unsigned o;
33197 struct offset *offset;
33198 char last_reg_s[10];
33199- int last_reg;
33200+ unsigned long last_reg;
33201
33202 if (regcomp
33203 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33204diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
33205index 6735213..38c2c67 100644
33206--- a/drivers/gpu/drm/radeon/radeon.h
33207+++ b/drivers/gpu/drm/radeon/radeon.h
33208@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
33209 */
33210 struct radeon_fence_driver {
33211 uint32_t scratch_reg;
33212- atomic_t seq;
33213+ atomic_unchecked_t seq;
33214 uint32_t last_seq;
33215 unsigned long count_timeout;
33216 wait_queue_head_t queue;
33217@@ -640,7 +640,7 @@ struct radeon_asic {
33218 uint32_t offset, uint32_t obj_size);
33219 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
33220 void (*bandwidth_update)(struct radeon_device *rdev);
33221-};
33222+} __no_const;
33223
33224 /*
33225 * Asic structures
33226diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
33227index 4e928b9..d8b6008 100644
33228--- a/drivers/gpu/drm/radeon/radeon_atombios.c
33229+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
33230@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
33231 bool linkb;
33232 struct radeon_i2c_bus_rec ddc_bus;
33233
33234+ pax_track_stack();
33235+
33236 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33237
33238 if (data_offset == 0)
33239@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
33240 }
33241 }
33242
33243-struct bios_connector {
33244+static struct bios_connector {
33245 bool valid;
33246 uint16_t line_mux;
33247 uint16_t devices;
33248 int connector_type;
33249 struct radeon_i2c_bus_rec ddc_bus;
33250-};
33251+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33252
33253 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33254 drm_device
33255@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33256 uint8_t dac;
33257 union atom_supported_devices *supported_devices;
33258 int i, j;
33259- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33260
33261 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33262
33263diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
33264index 083a181..ccccae0 100644
33265--- a/drivers/gpu/drm/radeon/radeon_display.c
33266+++ b/drivers/gpu/drm/radeon/radeon_display.c
33267@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
33268
33269 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
33270 error = freq - current_freq;
33271- error = error < 0 ? 0xffffffff : error;
33272+ error = (int32_t)error < 0 ? 0xffffffff : error;
33273 } else
33274 error = abs(current_freq - freq);
33275 vco_diff = abs(vco - best_vco);
33276diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33277index 76e4070..193fa7f 100644
33278--- a/drivers/gpu/drm/radeon/radeon_drv.h
33279+++ b/drivers/gpu/drm/radeon/radeon_drv.h
33280@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
33281
33282 /* SW interrupt */
33283 wait_queue_head_t swi_queue;
33284- atomic_t swi_emitted;
33285+ atomic_unchecked_t swi_emitted;
33286 int vblank_crtc;
33287 uint32_t irq_enable_reg;
33288 uint32_t r500_disp_irq_reg;
33289diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
33290index 3beb26d..6ce9c4a 100644
33291--- a/drivers/gpu/drm/radeon/radeon_fence.c
33292+++ b/drivers/gpu/drm/radeon/radeon_fence.c
33293@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
33294 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
33295 return 0;
33296 }
33297- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
33298+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
33299 if (!rdev->cp.ready) {
33300 /* FIXME: cp is not running assume everythings is done right
33301 * away
33302@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
33303 return r;
33304 }
33305 WREG32(rdev->fence_drv.scratch_reg, 0);
33306- atomic_set(&rdev->fence_drv.seq, 0);
33307+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
33308 INIT_LIST_HEAD(&rdev->fence_drv.created);
33309 INIT_LIST_HEAD(&rdev->fence_drv.emited);
33310 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
33311diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33312index a1bf11d..4a123c0 100644
33313--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33314+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33315@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33316 request = compat_alloc_user_space(sizeof(*request));
33317 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33318 || __put_user(req32.param, &request->param)
33319- || __put_user((void __user *)(unsigned long)req32.value,
33320+ || __put_user((unsigned long)req32.value,
33321 &request->value))
33322 return -EFAULT;
33323
33324diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33325index b79ecc4..8dab92d 100644
33326--- a/drivers/gpu/drm/radeon/radeon_irq.c
33327+++ b/drivers/gpu/drm/radeon/radeon_irq.c
33328@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33329 unsigned int ret;
33330 RING_LOCALS;
33331
33332- atomic_inc(&dev_priv->swi_emitted);
33333- ret = atomic_read(&dev_priv->swi_emitted);
33334+ atomic_inc_unchecked(&dev_priv->swi_emitted);
33335+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33336
33337 BEGIN_RING(4);
33338 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33339@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33340 drm_radeon_private_t *dev_priv =
33341 (drm_radeon_private_t *) dev->dev_private;
33342
33343- atomic_set(&dev_priv->swi_emitted, 0);
33344+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33345 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33346
33347 dev->max_vblank_count = 0x001fffff;
33348diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33349index 4747910..48ca4b3 100644
33350--- a/drivers/gpu/drm/radeon/radeon_state.c
33351+++ b/drivers/gpu/drm/radeon/radeon_state.c
33352@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33353 {
33354 drm_radeon_private_t *dev_priv = dev->dev_private;
33355 drm_radeon_getparam_t *param = data;
33356- int value;
33357+ int value = 0;
33358
33359 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33360
33361diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33362index 1381e06..0e53b17 100644
33363--- a/drivers/gpu/drm/radeon/radeon_ttm.c
33364+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33365@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
33366 DRM_INFO("radeon: ttm finalized\n");
33367 }
33368
33369-static struct vm_operations_struct radeon_ttm_vm_ops;
33370-static const struct vm_operations_struct *ttm_vm_ops = NULL;
33371-
33372-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33373-{
33374- struct ttm_buffer_object *bo;
33375- int r;
33376-
33377- bo = (struct ttm_buffer_object *)vma->vm_private_data;
33378- if (bo == NULL) {
33379- return VM_FAULT_NOPAGE;
33380- }
33381- r = ttm_vm_ops->fault(vma, vmf);
33382- return r;
33383-}
33384-
33385 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33386 {
33387 struct drm_file *file_priv;
33388 struct radeon_device *rdev;
33389- int r;
33390
33391 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
33392 return drm_mmap(filp, vma);
33393@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33394
33395 file_priv = (struct drm_file *)filp->private_data;
33396 rdev = file_priv->minor->dev->dev_private;
33397- if (rdev == NULL) {
33398+ if (!rdev)
33399 return -EINVAL;
33400- }
33401- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33402- if (unlikely(r != 0)) {
33403- return r;
33404- }
33405- if (unlikely(ttm_vm_ops == NULL)) {
33406- ttm_vm_ops = vma->vm_ops;
33407- radeon_ttm_vm_ops = *ttm_vm_ops;
33408- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33409- }
33410- vma->vm_ops = &radeon_ttm_vm_ops;
33411- return 0;
33412+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33413 }
33414
33415
33416diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33417index b12ff76..0bd0c6e 100644
33418--- a/drivers/gpu/drm/radeon/rs690.c
33419+++ b/drivers/gpu/drm/radeon/rs690.c
33420@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33421 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33422 rdev->pm.sideport_bandwidth.full)
33423 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33424- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
33425+ read_delay_latency.full = rfixed_const(800 * 1000);
33426 read_delay_latency.full = rfixed_div(read_delay_latency,
33427 rdev->pm.igp_sideport_mclk);
33428+ a.full = rfixed_const(370);
33429+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
33430 } else {
33431 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33432 rdev->pm.k8_bandwidth.full)
33433diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
33434index 0ed436e..e6e7ce3 100644
33435--- a/drivers/gpu/drm/ttm/ttm_bo.c
33436+++ b/drivers/gpu/drm/ttm/ttm_bo.c
33437@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
33438 NULL
33439 };
33440
33441-static struct sysfs_ops ttm_bo_global_ops = {
33442+static const struct sysfs_ops ttm_bo_global_ops = {
33443 .show = &ttm_bo_global_show
33444 };
33445
33446diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33447index 1c040d0..f9e4af8 100644
33448--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
33449+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33450@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33451 {
33452 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33453 vma->vm_private_data;
33454- struct ttm_bo_device *bdev = bo->bdev;
33455+ struct ttm_bo_device *bdev;
33456 unsigned long bus_base;
33457 unsigned long bus_offset;
33458 unsigned long bus_size;
33459@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33460 unsigned long address = (unsigned long)vmf->virtual_address;
33461 int retval = VM_FAULT_NOPAGE;
33462
33463+ if (!bo)
33464+ return VM_FAULT_NOPAGE;
33465+ bdev = bo->bdev;
33466+
33467 /*
33468 * Work around locking order reversal in fault / nopfn
33469 * between mmap_sem and bo_reserve: Perform a trylock operation
33470diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
33471index b170071..28ae90e 100644
33472--- a/drivers/gpu/drm/ttm/ttm_global.c
33473+++ b/drivers/gpu/drm/ttm/ttm_global.c
33474@@ -36,7 +36,7 @@
33475 struct ttm_global_item {
33476 struct mutex mutex;
33477 void *object;
33478- int refcount;
33479+ atomic_t refcount;
33480 };
33481
33482 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
33483@@ -49,7 +49,7 @@ void ttm_global_init(void)
33484 struct ttm_global_item *item = &glob[i];
33485 mutex_init(&item->mutex);
33486 item->object = NULL;
33487- item->refcount = 0;
33488+ atomic_set(&item->refcount, 0);
33489 }
33490 }
33491
33492@@ -59,7 +59,7 @@ void ttm_global_release(void)
33493 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
33494 struct ttm_global_item *item = &glob[i];
33495 BUG_ON(item->object != NULL);
33496- BUG_ON(item->refcount != 0);
33497+ BUG_ON(atomic_read(&item->refcount) != 0);
33498 }
33499 }
33500
33501@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33502 void *object;
33503
33504 mutex_lock(&item->mutex);
33505- if (item->refcount == 0) {
33506+ if (atomic_read(&item->refcount) == 0) {
33507 item->object = kzalloc(ref->size, GFP_KERNEL);
33508 if (unlikely(item->object == NULL)) {
33509 ret = -ENOMEM;
33510@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33511 goto out_err;
33512
33513 }
33514- ++item->refcount;
33515+ atomic_inc(&item->refcount);
33516 ref->object = item->object;
33517 object = item->object;
33518 mutex_unlock(&item->mutex);
33519@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
33520 struct ttm_global_item *item = &glob[ref->global_type];
33521
33522 mutex_lock(&item->mutex);
33523- BUG_ON(item->refcount == 0);
33524+ BUG_ON(atomic_read(&item->refcount) == 0);
33525 BUG_ON(ref->object != item->object);
33526- if (--item->refcount == 0) {
33527+ if (atomic_dec_and_test(&item->refcount)) {
33528 ref->release(ref);
33529 item->object = NULL;
33530 }
33531diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
33532index 072c281..d8ef483 100644
33533--- a/drivers/gpu/drm/ttm/ttm_memory.c
33534+++ b/drivers/gpu/drm/ttm/ttm_memory.c
33535@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
33536 NULL
33537 };
33538
33539-static struct sysfs_ops ttm_mem_zone_ops = {
33540+static const struct sysfs_ops ttm_mem_zone_ops = {
33541 .show = &ttm_mem_zone_show,
33542 .store = &ttm_mem_zone_store
33543 };
33544diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
33545index cafcb84..b8e66cc 100644
33546--- a/drivers/gpu/drm/via/via_drv.h
33547+++ b/drivers/gpu/drm/via/via_drv.h
33548@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
33549 typedef uint32_t maskarray_t[5];
33550
33551 typedef struct drm_via_irq {
33552- atomic_t irq_received;
33553+ atomic_unchecked_t irq_received;
33554 uint32_t pending_mask;
33555 uint32_t enable_mask;
33556 wait_queue_head_t irq_queue;
33557@@ -75,7 +75,7 @@ typedef struct drm_via_private {
33558 struct timeval last_vblank;
33559 int last_vblank_valid;
33560 unsigned usec_per_vblank;
33561- atomic_t vbl_received;
33562+ atomic_unchecked_t vbl_received;
33563 drm_via_state_t hc_state;
33564 char pci_buf[VIA_PCI_BUF_SIZE];
33565 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
33566diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
33567index 5935b88..127a8a6 100644
33568--- a/drivers/gpu/drm/via/via_irq.c
33569+++ b/drivers/gpu/drm/via/via_irq.c
33570@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
33571 if (crtc != 0)
33572 return 0;
33573
33574- return atomic_read(&dev_priv->vbl_received);
33575+ return atomic_read_unchecked(&dev_priv->vbl_received);
33576 }
33577
33578 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33579@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33580
33581 status = VIA_READ(VIA_REG_INTERRUPT);
33582 if (status & VIA_IRQ_VBLANK_PENDING) {
33583- atomic_inc(&dev_priv->vbl_received);
33584- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
33585+ atomic_inc_unchecked(&dev_priv->vbl_received);
33586+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
33587 do_gettimeofday(&cur_vblank);
33588 if (dev_priv->last_vblank_valid) {
33589 dev_priv->usec_per_vblank =
33590@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33591 dev_priv->last_vblank = cur_vblank;
33592 dev_priv->last_vblank_valid = 1;
33593 }
33594- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
33595+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
33596 DRM_DEBUG("US per vblank is: %u\n",
33597 dev_priv->usec_per_vblank);
33598 }
33599@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33600
33601 for (i = 0; i < dev_priv->num_irqs; ++i) {
33602 if (status & cur_irq->pending_mask) {
33603- atomic_inc(&cur_irq->irq_received);
33604+ atomic_inc_unchecked(&cur_irq->irq_received);
33605 DRM_WAKEUP(&cur_irq->irq_queue);
33606 handled = 1;
33607 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
33608@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
33609 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33610 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
33611 masks[irq][4]));
33612- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
33613+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
33614 } else {
33615 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33616 (((cur_irq_sequence =
33617- atomic_read(&cur_irq->irq_received)) -
33618+ atomic_read_unchecked(&cur_irq->irq_received)) -
33619 *sequence) <= (1 << 23)));
33620 }
33621 *sequence = cur_irq_sequence;
33622@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
33623 }
33624
33625 for (i = 0; i < dev_priv->num_irqs; ++i) {
33626- atomic_set(&cur_irq->irq_received, 0);
33627+ atomic_set_unchecked(&cur_irq->irq_received, 0);
33628 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
33629 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
33630 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
33631@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
33632 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
33633 case VIA_IRQ_RELATIVE:
33634 irqwait->request.sequence +=
33635- atomic_read(&cur_irq->irq_received);
33636+ atomic_read_unchecked(&cur_irq->irq_received);
33637 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
33638 case VIA_IRQ_ABSOLUTE:
33639 break;
33640diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
33641index aa8688d..6a0140c 100644
33642--- a/drivers/gpu/vga/vgaarb.c
33643+++ b/drivers/gpu/vga/vgaarb.c
33644@@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
33645 uc = &priv->cards[i];
33646 }
33647
33648- if (!uc)
33649- return -EINVAL;
33650+ if (!uc) {
33651+ ret_val = -EINVAL;
33652+ goto done;
33653+ }
33654
33655- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
33656- return -EINVAL;
33657+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
33658+ ret_val = -EINVAL;
33659+ goto done;
33660+ }
33661
33662- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
33663- return -EINVAL;
33664+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
33665+ ret_val = -EINVAL;
33666+ goto done;
33667+ }
33668
33669 vga_put(pdev, io_state);
33670
33671diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
33672index f3f1415..e561d90 100644
33673--- a/drivers/hid/hid-core.c
33674+++ b/drivers/hid/hid-core.c
33675@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
33676
33677 int hid_add_device(struct hid_device *hdev)
33678 {
33679- static atomic_t id = ATOMIC_INIT(0);
33680+ static atomic_unchecked_t id = ATOMIC_INIT(0);
33681 int ret;
33682
33683 if (WARN_ON(hdev->status & HID_STAT_ADDED))
33684@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
33685 /* XXX hack, any other cleaner solution after the driver core
33686 * is converted to allow more than 20 bytes as the device name? */
33687 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
33688- hdev->vendor, hdev->product, atomic_inc_return(&id));
33689+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
33690
33691 ret = device_add(&hdev->dev);
33692 if (!ret)
33693diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
33694index 8b6ee24..70f657d 100644
33695--- a/drivers/hid/usbhid/hiddev.c
33696+++ b/drivers/hid/usbhid/hiddev.c
33697@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33698 return put_user(HID_VERSION, (int __user *)arg);
33699
33700 case HIDIOCAPPLICATION:
33701- if (arg < 0 || arg >= hid->maxapplication)
33702+ if (arg >= hid->maxapplication)
33703 return -EINVAL;
33704
33705 for (i = 0; i < hid->maxcollection; i++)
33706diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
33707index 5d5ed69..f40533e 100644
33708--- a/drivers/hwmon/lis3lv02d.c
33709+++ b/drivers/hwmon/lis3lv02d.c
33710@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
33711 * the lid is closed. This leads to interrupts as soon as a little move
33712 * is done.
33713 */
33714- atomic_inc(&lis3_dev.count);
33715+ atomic_inc_unchecked(&lis3_dev.count);
33716
33717 wake_up_interruptible(&lis3_dev.misc_wait);
33718 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
33719@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
33720 if (test_and_set_bit(0, &lis3_dev.misc_opened))
33721 return -EBUSY; /* already open */
33722
33723- atomic_set(&lis3_dev.count, 0);
33724+ atomic_set_unchecked(&lis3_dev.count, 0);
33725
33726 /*
33727 * The sensor can generate interrupts for free-fall and direction
33728@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
33729 add_wait_queue(&lis3_dev.misc_wait, &wait);
33730 while (true) {
33731 set_current_state(TASK_INTERRUPTIBLE);
33732- data = atomic_xchg(&lis3_dev.count, 0);
33733+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
33734 if (data)
33735 break;
33736
33737@@ -244,7 +244,7 @@ out:
33738 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
33739 {
33740 poll_wait(file, &lis3_dev.misc_wait, wait);
33741- if (atomic_read(&lis3_dev.count))
33742+ if (atomic_read_unchecked(&lis3_dev.count))
33743 return POLLIN | POLLRDNORM;
33744 return 0;
33745 }
33746diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
33747index 7cdd76f..fe0efdf 100644
33748--- a/drivers/hwmon/lis3lv02d.h
33749+++ b/drivers/hwmon/lis3lv02d.h
33750@@ -201,7 +201,7 @@ struct lis3lv02d {
33751
33752 struct input_polled_dev *idev; /* input device */
33753 struct platform_device *pdev; /* platform device */
33754- atomic_t count; /* interrupt count after last read */
33755+ atomic_unchecked_t count; /* interrupt count after last read */
33756 int xcalib; /* calibrated null value for x */
33757 int ycalib; /* calibrated null value for y */
33758 int zcalib; /* calibrated null value for z */
33759diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
33760index 2040507..706ec1e 100644
33761--- a/drivers/hwmon/sht15.c
33762+++ b/drivers/hwmon/sht15.c
33763@@ -112,7 +112,7 @@ struct sht15_data {
33764 int supply_uV;
33765 int supply_uV_valid;
33766 struct work_struct update_supply_work;
33767- atomic_t interrupt_handled;
33768+ atomic_unchecked_t interrupt_handled;
33769 };
33770
33771 /**
33772@@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
33773 return ret;
33774
33775 gpio_direction_input(data->pdata->gpio_data);
33776- atomic_set(&data->interrupt_handled, 0);
33777+ atomic_set_unchecked(&data->interrupt_handled, 0);
33778
33779 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33780 if (gpio_get_value(data->pdata->gpio_data) == 0) {
33781 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
33782 /* Only relevant if the interrupt hasn't occured. */
33783- if (!atomic_read(&data->interrupt_handled))
33784+ if (!atomic_read_unchecked(&data->interrupt_handled))
33785 schedule_work(&data->read_work);
33786 }
33787 ret = wait_event_timeout(data->wait_queue,
33788@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
33789 struct sht15_data *data = d;
33790 /* First disable the interrupt */
33791 disable_irq_nosync(irq);
33792- atomic_inc(&data->interrupt_handled);
33793+ atomic_inc_unchecked(&data->interrupt_handled);
33794 /* Then schedule a reading work struct */
33795 if (data->flag != SHT15_READING_NOTHING)
33796 schedule_work(&data->read_work);
33797@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
33798 here as could have gone low in meantime so verify
33799 it hasn't!
33800 */
33801- atomic_set(&data->interrupt_handled, 0);
33802+ atomic_set_unchecked(&data->interrupt_handled, 0);
33803 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33804 /* If still not occured or another handler has been scheduled */
33805 if (gpio_get_value(data->pdata->gpio_data)
33806- || atomic_read(&data->interrupt_handled))
33807+ || atomic_read_unchecked(&data->interrupt_handled))
33808 return;
33809 }
33810 /* Read the data back from the device */
33811diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
33812index 97851c5..cb40626 100644
33813--- a/drivers/hwmon/w83791d.c
33814+++ b/drivers/hwmon/w83791d.c
33815@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
33816 struct i2c_board_info *info);
33817 static int w83791d_remove(struct i2c_client *client);
33818
33819-static int w83791d_read(struct i2c_client *client, u8 register);
33820-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
33821+static int w83791d_read(struct i2c_client *client, u8 reg);
33822+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
33823 static struct w83791d_data *w83791d_update_device(struct device *dev);
33824
33825 #ifdef DEBUG
33826diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
33827index 378fcb5..5e91fa8 100644
33828--- a/drivers/i2c/busses/i2c-amd756-s4882.c
33829+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
33830@@ -43,7 +43,7 @@
33831 extern struct i2c_adapter amd756_smbus;
33832
33833 static struct i2c_adapter *s4882_adapter;
33834-static struct i2c_algorithm *s4882_algo;
33835+static i2c_algorithm_no_const *s4882_algo;
33836
33837 /* Wrapper access functions for multiplexed SMBus */
33838 static DEFINE_MUTEX(amd756_lock);
33839diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
33840index 29015eb..af2d8e9 100644
33841--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
33842+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
33843@@ -41,7 +41,7 @@
33844 extern struct i2c_adapter *nforce2_smbus;
33845
33846 static struct i2c_adapter *s4985_adapter;
33847-static struct i2c_algorithm *s4985_algo;
33848+static i2c_algorithm_no_const *s4985_algo;
33849
33850 /* Wrapper access functions for multiplexed SMBus */
33851 static DEFINE_MUTEX(nforce2_lock);
33852diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
33853index 878f8ec..12376fc 100644
33854--- a/drivers/ide/aec62xx.c
33855+++ b/drivers/ide/aec62xx.c
33856@@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
33857 .cable_detect = atp86x_cable_detect,
33858 };
33859
33860-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
33861+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
33862 { /* 0: AEC6210 */
33863 .name = DRV_NAME,
33864 .init_chipset = init_chipset_aec62xx,
33865diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
33866index e59b6de..4b4fc65 100644
33867--- a/drivers/ide/alim15x3.c
33868+++ b/drivers/ide/alim15x3.c
33869@@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
33870 .dma_sff_read_status = ide_dma_sff_read_status,
33871 };
33872
33873-static const struct ide_port_info ali15x3_chipset __devinitdata = {
33874+static const struct ide_port_info ali15x3_chipset __devinitconst = {
33875 .name = DRV_NAME,
33876 .init_chipset = init_chipset_ali15x3,
33877 .init_hwif = init_hwif_ali15x3,
33878diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
33879index 628cd2e..087a414 100644
33880--- a/drivers/ide/amd74xx.c
33881+++ b/drivers/ide/amd74xx.c
33882@@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
33883 .udma_mask = udma, \
33884 }
33885
33886-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
33887+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
33888 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
33889 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
33890 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
33891diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
33892index 837322b..837fd71 100644
33893--- a/drivers/ide/atiixp.c
33894+++ b/drivers/ide/atiixp.c
33895@@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
33896 .cable_detect = atiixp_cable_detect,
33897 };
33898
33899-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
33900+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
33901 { /* 0: IXP200/300/400/700 */
33902 .name = DRV_NAME,
33903 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
33904diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
33905index ca0c46f..d55318a 100644
33906--- a/drivers/ide/cmd64x.c
33907+++ b/drivers/ide/cmd64x.c
33908@@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
33909 .dma_sff_read_status = ide_dma_sff_read_status,
33910 };
33911
33912-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
33913+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
33914 { /* 0: CMD643 */
33915 .name = DRV_NAME,
33916 .init_chipset = init_chipset_cmd64x,
33917diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
33918index 09f98ed..cebc5bc 100644
33919--- a/drivers/ide/cs5520.c
33920+++ b/drivers/ide/cs5520.c
33921@@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
33922 .set_dma_mode = cs5520_set_dma_mode,
33923 };
33924
33925-static const struct ide_port_info cyrix_chipset __devinitdata = {
33926+static const struct ide_port_info cyrix_chipset __devinitconst = {
33927 .name = DRV_NAME,
33928 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
33929 .port_ops = &cs5520_port_ops,
33930diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
33931index 40bf05e..7d58ca0 100644
33932--- a/drivers/ide/cs5530.c
33933+++ b/drivers/ide/cs5530.c
33934@@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
33935 .udma_filter = cs5530_udma_filter,
33936 };
33937
33938-static const struct ide_port_info cs5530_chipset __devinitdata = {
33939+static const struct ide_port_info cs5530_chipset __devinitconst = {
33940 .name = DRV_NAME,
33941 .init_chipset = init_chipset_cs5530,
33942 .init_hwif = init_hwif_cs5530,
33943diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
33944index 983d957..53e6172 100644
33945--- a/drivers/ide/cs5535.c
33946+++ b/drivers/ide/cs5535.c
33947@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
33948 .cable_detect = cs5535_cable_detect,
33949 };
33950
33951-static const struct ide_port_info cs5535_chipset __devinitdata = {
33952+static const struct ide_port_info cs5535_chipset __devinitconst = {
33953 .name = DRV_NAME,
33954 .port_ops = &cs5535_port_ops,
33955 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
33956diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
33957index 74fc540..8e933d8 100644
33958--- a/drivers/ide/cy82c693.c
33959+++ b/drivers/ide/cy82c693.c
33960@@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
33961 .set_dma_mode = cy82c693_set_dma_mode,
33962 };
33963
33964-static const struct ide_port_info cy82c693_chipset __devinitdata = {
33965+static const struct ide_port_info cy82c693_chipset __devinitconst = {
33966 .name = DRV_NAME,
33967 .init_iops = init_iops_cy82c693,
33968 .port_ops = &cy82c693_port_ops,
33969diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
33970index 7ce68ef..e78197d 100644
33971--- a/drivers/ide/hpt366.c
33972+++ b/drivers/ide/hpt366.c
33973@@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
33974 }
33975 };
33976
33977-static const struct hpt_info hpt36x __devinitdata = {
33978+static const struct hpt_info hpt36x __devinitconst = {
33979 .chip_name = "HPT36x",
33980 .chip_type = HPT36x,
33981 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
33982@@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
33983 .timings = &hpt36x_timings
33984 };
33985
33986-static const struct hpt_info hpt370 __devinitdata = {
33987+static const struct hpt_info hpt370 __devinitconst = {
33988 .chip_name = "HPT370",
33989 .chip_type = HPT370,
33990 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
33991@@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
33992 .timings = &hpt37x_timings
33993 };
33994
33995-static const struct hpt_info hpt370a __devinitdata = {
33996+static const struct hpt_info hpt370a __devinitconst = {
33997 .chip_name = "HPT370A",
33998 .chip_type = HPT370A,
33999 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34000@@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
34001 .timings = &hpt37x_timings
34002 };
34003
34004-static const struct hpt_info hpt374 __devinitdata = {
34005+static const struct hpt_info hpt374 __devinitconst = {
34006 .chip_name = "HPT374",
34007 .chip_type = HPT374,
34008 .udma_mask = ATA_UDMA5,
34009@@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
34010 .timings = &hpt37x_timings
34011 };
34012
34013-static const struct hpt_info hpt372 __devinitdata = {
34014+static const struct hpt_info hpt372 __devinitconst = {
34015 .chip_name = "HPT372",
34016 .chip_type = HPT372,
34017 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34018@@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
34019 .timings = &hpt37x_timings
34020 };
34021
34022-static const struct hpt_info hpt372a __devinitdata = {
34023+static const struct hpt_info hpt372a __devinitconst = {
34024 .chip_name = "HPT372A",
34025 .chip_type = HPT372A,
34026 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34027@@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
34028 .timings = &hpt37x_timings
34029 };
34030
34031-static const struct hpt_info hpt302 __devinitdata = {
34032+static const struct hpt_info hpt302 __devinitconst = {
34033 .chip_name = "HPT302",
34034 .chip_type = HPT302,
34035 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34036@@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
34037 .timings = &hpt37x_timings
34038 };
34039
34040-static const struct hpt_info hpt371 __devinitdata = {
34041+static const struct hpt_info hpt371 __devinitconst = {
34042 .chip_name = "HPT371",
34043 .chip_type = HPT371,
34044 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34045@@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
34046 .timings = &hpt37x_timings
34047 };
34048
34049-static const struct hpt_info hpt372n __devinitdata = {
34050+static const struct hpt_info hpt372n __devinitconst = {
34051 .chip_name = "HPT372N",
34052 .chip_type = HPT372N,
34053 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34054@@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
34055 .timings = &hpt37x_timings
34056 };
34057
34058-static const struct hpt_info hpt302n __devinitdata = {
34059+static const struct hpt_info hpt302n __devinitconst = {
34060 .chip_name = "HPT302N",
34061 .chip_type = HPT302N,
34062 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34063@@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
34064 .timings = &hpt37x_timings
34065 };
34066
34067-static const struct hpt_info hpt371n __devinitdata = {
34068+static const struct hpt_info hpt371n __devinitconst = {
34069 .chip_name = "HPT371N",
34070 .chip_type = HPT371N,
34071 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34072@@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
34073 .dma_sff_read_status = ide_dma_sff_read_status,
34074 };
34075
34076-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
34077+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
34078 { /* 0: HPT36x */
34079 .name = DRV_NAME,
34080 .init_chipset = init_chipset_hpt366,
34081diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34082index 2de76cc..74186a1 100644
34083--- a/drivers/ide/ide-cd.c
34084+++ b/drivers/ide/ide-cd.c
34085@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34086 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34087 if ((unsigned long)buf & alignment
34088 || blk_rq_bytes(rq) & q->dma_pad_mask
34089- || object_is_on_stack(buf))
34090+ || object_starts_on_stack(buf))
34091 drive->dma = 0;
34092 }
34093 }
34094diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
34095index fefbdfc..62ff465 100644
34096--- a/drivers/ide/ide-floppy.c
34097+++ b/drivers/ide/ide-floppy.c
34098@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
34099 u8 pc_buf[256], header_len, desc_cnt;
34100 int i, rc = 1, blocks, length;
34101
34102+ pax_track_stack();
34103+
34104 ide_debug_log(IDE_DBG_FUNC, "enter");
34105
34106 drive->bios_cyl = 0;
34107diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
34108index 39d4e01..11538ce 100644
34109--- a/drivers/ide/ide-pci-generic.c
34110+++ b/drivers/ide/ide-pci-generic.c
34111@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
34112 .udma_mask = ATA_UDMA6, \
34113 }
34114
34115-static const struct ide_port_info generic_chipsets[] __devinitdata = {
34116+static const struct ide_port_info generic_chipsets[] __devinitconst = {
34117 /* 0: Unknown */
34118 DECLARE_GENERIC_PCI_DEV(0),
34119
34120diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
34121index 0d266a5..aaca790 100644
34122--- a/drivers/ide/it8172.c
34123+++ b/drivers/ide/it8172.c
34124@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
34125 .set_dma_mode = it8172_set_dma_mode,
34126 };
34127
34128-static const struct ide_port_info it8172_port_info __devinitdata = {
34129+static const struct ide_port_info it8172_port_info __devinitconst = {
34130 .name = DRV_NAME,
34131 .port_ops = &it8172_port_ops,
34132 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
34133diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
34134index 4797616..4be488a 100644
34135--- a/drivers/ide/it8213.c
34136+++ b/drivers/ide/it8213.c
34137@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
34138 .cable_detect = it8213_cable_detect,
34139 };
34140
34141-static const struct ide_port_info it8213_chipset __devinitdata = {
34142+static const struct ide_port_info it8213_chipset __devinitconst = {
34143 .name = DRV_NAME,
34144 .enablebits = { {0x41, 0x80, 0x80} },
34145 .port_ops = &it8213_port_ops,
34146diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
34147index 51aa745..146ee60 100644
34148--- a/drivers/ide/it821x.c
34149+++ b/drivers/ide/it821x.c
34150@@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
34151 .cable_detect = it821x_cable_detect,
34152 };
34153
34154-static const struct ide_port_info it821x_chipset __devinitdata = {
34155+static const struct ide_port_info it821x_chipset __devinitconst = {
34156 .name = DRV_NAME,
34157 .init_chipset = init_chipset_it821x,
34158 .init_hwif = init_hwif_it821x,
34159diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
34160index bf2be64..9270098 100644
34161--- a/drivers/ide/jmicron.c
34162+++ b/drivers/ide/jmicron.c
34163@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
34164 .cable_detect = jmicron_cable_detect,
34165 };
34166
34167-static const struct ide_port_info jmicron_chipset __devinitdata = {
34168+static const struct ide_port_info jmicron_chipset __devinitconst = {
34169 .name = DRV_NAME,
34170 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
34171 .port_ops = &jmicron_port_ops,
34172diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
34173index 95327a2..73f78d8 100644
34174--- a/drivers/ide/ns87415.c
34175+++ b/drivers/ide/ns87415.c
34176@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
34177 .dma_sff_read_status = superio_dma_sff_read_status,
34178 };
34179
34180-static const struct ide_port_info ns87415_chipset __devinitdata = {
34181+static const struct ide_port_info ns87415_chipset __devinitconst = {
34182 .name = DRV_NAME,
34183 .init_hwif = init_hwif_ns87415,
34184 .tp_ops = &ns87415_tp_ops,
34185diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
34186index f1d70d6..e1de05b 100644
34187--- a/drivers/ide/opti621.c
34188+++ b/drivers/ide/opti621.c
34189@@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
34190 .set_pio_mode = opti621_set_pio_mode,
34191 };
34192
34193-static const struct ide_port_info opti621_chipset __devinitdata = {
34194+static const struct ide_port_info opti621_chipset __devinitconst = {
34195 .name = DRV_NAME,
34196 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
34197 .port_ops = &opti621_port_ops,
34198diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
34199index 65ba823..7311f4d 100644
34200--- a/drivers/ide/pdc202xx_new.c
34201+++ b/drivers/ide/pdc202xx_new.c
34202@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
34203 .udma_mask = udma, \
34204 }
34205
34206-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
34207+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
34208 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
34209 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
34210 };
34211diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
34212index cb812f3..af816ef 100644
34213--- a/drivers/ide/pdc202xx_old.c
34214+++ b/drivers/ide/pdc202xx_old.c
34215@@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
34216 .max_sectors = sectors, \
34217 }
34218
34219-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
34220+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
34221 { /* 0: PDC20246 */
34222 .name = DRV_NAME,
34223 .init_chipset = init_chipset_pdc202xx,
34224diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
34225index bf14f39..15c4b98 100644
34226--- a/drivers/ide/piix.c
34227+++ b/drivers/ide/piix.c
34228@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
34229 .udma_mask = udma, \
34230 }
34231
34232-static const struct ide_port_info piix_pci_info[] __devinitdata = {
34233+static const struct ide_port_info piix_pci_info[] __devinitconst = {
34234 /* 0: MPIIX */
34235 { /*
34236 * MPIIX actually has only a single IDE channel mapped to
34237diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
34238index a6414a8..c04173e 100644
34239--- a/drivers/ide/rz1000.c
34240+++ b/drivers/ide/rz1000.c
34241@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
34242 }
34243 }
34244
34245-static const struct ide_port_info rz1000_chipset __devinitdata = {
34246+static const struct ide_port_info rz1000_chipset __devinitconst = {
34247 .name = DRV_NAME,
34248 .host_flags = IDE_HFLAG_NO_DMA,
34249 };
34250diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
34251index d467478..9203942 100644
34252--- a/drivers/ide/sc1200.c
34253+++ b/drivers/ide/sc1200.c
34254@@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
34255 .dma_sff_read_status = ide_dma_sff_read_status,
34256 };
34257
34258-static const struct ide_port_info sc1200_chipset __devinitdata = {
34259+static const struct ide_port_info sc1200_chipset __devinitconst = {
34260 .name = DRV_NAME,
34261 .port_ops = &sc1200_port_ops,
34262 .dma_ops = &sc1200_dma_ops,
34263diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
34264index 1104bb3..59c5194 100644
34265--- a/drivers/ide/scc_pata.c
34266+++ b/drivers/ide/scc_pata.c
34267@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
34268 .dma_sff_read_status = scc_dma_sff_read_status,
34269 };
34270
34271-static const struct ide_port_info scc_chipset __devinitdata = {
34272+static const struct ide_port_info scc_chipset __devinitconst = {
34273 .name = "sccIDE",
34274 .init_iops = init_iops_scc,
34275 .init_dma = scc_init_dma,
34276diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
34277index b6554ef..6cc2cc3 100644
34278--- a/drivers/ide/serverworks.c
34279+++ b/drivers/ide/serverworks.c
34280@@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
34281 .cable_detect = svwks_cable_detect,
34282 };
34283
34284-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
34285+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
34286 { /* 0: OSB4 */
34287 .name = DRV_NAME,
34288 .init_chipset = init_chipset_svwks,
34289diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
34290index ab3db61..afed580 100644
34291--- a/drivers/ide/setup-pci.c
34292+++ b/drivers/ide/setup-pci.c
34293@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
34294 int ret, i, n_ports = dev2 ? 4 : 2;
34295 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
34296
34297+ pax_track_stack();
34298+
34299 for (i = 0; i < n_ports / 2; i++) {
34300 ret = ide_setup_pci_controller(pdev[i], d, !i);
34301 if (ret < 0)
34302diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
34303index d95df52..0b03a39 100644
34304--- a/drivers/ide/siimage.c
34305+++ b/drivers/ide/siimage.c
34306@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
34307 .udma_mask = ATA_UDMA6, \
34308 }
34309
34310-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
34311+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
34312 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
34313 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
34314 };
34315diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
34316index 3b88eba..ca8699d 100644
34317--- a/drivers/ide/sis5513.c
34318+++ b/drivers/ide/sis5513.c
34319@@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
34320 .cable_detect = sis_cable_detect,
34321 };
34322
34323-static const struct ide_port_info sis5513_chipset __devinitdata = {
34324+static const struct ide_port_info sis5513_chipset __devinitconst = {
34325 .name = DRV_NAME,
34326 .init_chipset = init_chipset_sis5513,
34327 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
34328diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
34329index d698da4..fca42a4 100644
34330--- a/drivers/ide/sl82c105.c
34331+++ b/drivers/ide/sl82c105.c
34332@@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
34333 .dma_sff_read_status = ide_dma_sff_read_status,
34334 };
34335
34336-static const struct ide_port_info sl82c105_chipset __devinitdata = {
34337+static const struct ide_port_info sl82c105_chipset __devinitconst = {
34338 .name = DRV_NAME,
34339 .init_chipset = init_chipset_sl82c105,
34340 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
34341diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
34342index 1ccfb40..83d5779 100644
34343--- a/drivers/ide/slc90e66.c
34344+++ b/drivers/ide/slc90e66.c
34345@@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
34346 .cable_detect = slc90e66_cable_detect,
34347 };
34348
34349-static const struct ide_port_info slc90e66_chipset __devinitdata = {
34350+static const struct ide_port_info slc90e66_chipset __devinitconst = {
34351 .name = DRV_NAME,
34352 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
34353 .port_ops = &slc90e66_port_ops,
34354diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
34355index 05a93d6..5f9e325 100644
34356--- a/drivers/ide/tc86c001.c
34357+++ b/drivers/ide/tc86c001.c
34358@@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
34359 .dma_sff_read_status = ide_dma_sff_read_status,
34360 };
34361
34362-static const struct ide_port_info tc86c001_chipset __devinitdata = {
34363+static const struct ide_port_info tc86c001_chipset __devinitconst = {
34364 .name = DRV_NAME,
34365 .init_hwif = init_hwif_tc86c001,
34366 .port_ops = &tc86c001_port_ops,
34367diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
34368index 8773c3b..7907d6c 100644
34369--- a/drivers/ide/triflex.c
34370+++ b/drivers/ide/triflex.c
34371@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
34372 .set_dma_mode = triflex_set_mode,
34373 };
34374
34375-static const struct ide_port_info triflex_device __devinitdata = {
34376+static const struct ide_port_info triflex_device __devinitconst = {
34377 .name = DRV_NAME,
34378 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
34379 .port_ops = &triflex_port_ops,
34380diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
34381index 4b42ca0..e494a98 100644
34382--- a/drivers/ide/trm290.c
34383+++ b/drivers/ide/trm290.c
34384@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
34385 .dma_check = trm290_dma_check,
34386 };
34387
34388-static const struct ide_port_info trm290_chipset __devinitdata = {
34389+static const struct ide_port_info trm290_chipset __devinitconst = {
34390 .name = DRV_NAME,
34391 .init_hwif = init_hwif_trm290,
34392 .tp_ops = &trm290_tp_ops,
34393diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
34394index 028de26..520d5d5 100644
34395--- a/drivers/ide/via82cxxx.c
34396+++ b/drivers/ide/via82cxxx.c
34397@@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
34398 .cable_detect = via82cxxx_cable_detect,
34399 };
34400
34401-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
34402+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
34403 .name = DRV_NAME,
34404 .init_chipset = init_chipset_via82cxxx,
34405 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
34406diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
34407index 2cd00b5..14de699 100644
34408--- a/drivers/ieee1394/dv1394.c
34409+++ b/drivers/ieee1394/dv1394.c
34410@@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
34411 based upon DIF section and sequence
34412 */
34413
34414-static void inline
34415+static inline void
34416 frame_put_packet (struct frame *f, struct packet *p)
34417 {
34418 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
34419diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
34420index e947d8f..6a966b9 100644
34421--- a/drivers/ieee1394/hosts.c
34422+++ b/drivers/ieee1394/hosts.c
34423@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
34424 }
34425
34426 static struct hpsb_host_driver dummy_driver = {
34427+ .name = "dummy",
34428 .transmit_packet = dummy_transmit_packet,
34429 .devctl = dummy_devctl,
34430 .isoctl = dummy_isoctl
34431diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
34432index ddaab6e..8d37435 100644
34433--- a/drivers/ieee1394/init_ohci1394_dma.c
34434+++ b/drivers/ieee1394/init_ohci1394_dma.c
34435@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
34436 for (func = 0; func < 8; func++) {
34437 u32 class = read_pci_config(num,slot,func,
34438 PCI_CLASS_REVISION);
34439- if ((class == 0xffffffff))
34440+ if (class == 0xffffffff)
34441 continue; /* No device at this func */
34442
34443 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
34444diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
34445index 65c1429..5d8c11f 100644
34446--- a/drivers/ieee1394/ohci1394.c
34447+++ b/drivers/ieee1394/ohci1394.c
34448@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
34449 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
34450
34451 /* Module Parameters */
34452-static int phys_dma = 1;
34453+static int phys_dma;
34454 module_param(phys_dma, int, 0444);
34455-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
34456+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
34457
34458 static void dma_trm_tasklet(unsigned long data);
34459 static void dma_trm_reset(struct dma_trm_ctx *d);
34460diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
34461index f199896..78c9fc8 100644
34462--- a/drivers/ieee1394/sbp2.c
34463+++ b/drivers/ieee1394/sbp2.c
34464@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
34465 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
34466 MODULE_LICENSE("GPL");
34467
34468-static int sbp2_module_init(void)
34469+static int __init sbp2_module_init(void)
34470 {
34471 int ret;
34472
34473diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34474index a5dea6b..0cefe8f 100644
34475--- a/drivers/infiniband/core/cm.c
34476+++ b/drivers/infiniband/core/cm.c
34477@@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34478
34479 struct cm_counter_group {
34480 struct kobject obj;
34481- atomic_long_t counter[CM_ATTR_COUNT];
34482+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
34483 };
34484
34485 struct cm_counter_attribute {
34486@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
34487 struct ib_mad_send_buf *msg = NULL;
34488 int ret;
34489
34490- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34491+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34492 counter[CM_REQ_COUNTER]);
34493
34494 /* Quick state check to discard duplicate REQs. */
34495@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
34496 if (!cm_id_priv)
34497 return;
34498
34499- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34500+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34501 counter[CM_REP_COUNTER]);
34502 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
34503 if (ret)
34504@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
34505 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
34506 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
34507 spin_unlock_irq(&cm_id_priv->lock);
34508- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34509+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34510 counter[CM_RTU_COUNTER]);
34511 goto out;
34512 }
34513@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
34514 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
34515 dreq_msg->local_comm_id);
34516 if (!cm_id_priv) {
34517- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34518+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34519 counter[CM_DREQ_COUNTER]);
34520 cm_issue_drep(work->port, work->mad_recv_wc);
34521 return -EINVAL;
34522@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
34523 case IB_CM_MRA_REP_RCVD:
34524 break;
34525 case IB_CM_TIMEWAIT:
34526- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34527+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34528 counter[CM_DREQ_COUNTER]);
34529 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34530 goto unlock;
34531@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
34532 cm_free_msg(msg);
34533 goto deref;
34534 case IB_CM_DREQ_RCVD:
34535- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34536+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34537 counter[CM_DREQ_COUNTER]);
34538 goto unlock;
34539 default:
34540@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
34541 ib_modify_mad(cm_id_priv->av.port->mad_agent,
34542 cm_id_priv->msg, timeout)) {
34543 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
34544- atomic_long_inc(&work->port->
34545+ atomic_long_inc_unchecked(&work->port->
34546 counter_group[CM_RECV_DUPLICATES].
34547 counter[CM_MRA_COUNTER]);
34548 goto out;
34549@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
34550 break;
34551 case IB_CM_MRA_REQ_RCVD:
34552 case IB_CM_MRA_REP_RCVD:
34553- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34554+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34555 counter[CM_MRA_COUNTER]);
34556 /* fall through */
34557 default:
34558@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
34559 case IB_CM_LAP_IDLE:
34560 break;
34561 case IB_CM_MRA_LAP_SENT:
34562- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34563+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34564 counter[CM_LAP_COUNTER]);
34565 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34566 goto unlock;
34567@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
34568 cm_free_msg(msg);
34569 goto deref;
34570 case IB_CM_LAP_RCVD:
34571- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34572+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34573 counter[CM_LAP_COUNTER]);
34574 goto unlock;
34575 default:
34576@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
34577 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
34578 if (cur_cm_id_priv) {
34579 spin_unlock_irq(&cm.lock);
34580- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34581+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34582 counter[CM_SIDR_REQ_COUNTER]);
34583 goto out; /* Duplicate message. */
34584 }
34585@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
34586 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
34587 msg->retries = 1;
34588
34589- atomic_long_add(1 + msg->retries,
34590+ atomic_long_add_unchecked(1 + msg->retries,
34591 &port->counter_group[CM_XMIT].counter[attr_index]);
34592 if (msg->retries)
34593- atomic_long_add(msg->retries,
34594+ atomic_long_add_unchecked(msg->retries,
34595 &port->counter_group[CM_XMIT_RETRIES].
34596 counter[attr_index]);
34597
34598@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
34599 }
34600
34601 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
34602- atomic_long_inc(&port->counter_group[CM_RECV].
34603+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
34604 counter[attr_id - CM_ATTR_ID_OFFSET]);
34605
34606 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
34607@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
34608 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
34609
34610 return sprintf(buf, "%ld\n",
34611- atomic_long_read(&group->counter[cm_attr->index]));
34612+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
34613 }
34614
34615-static struct sysfs_ops cm_counter_ops = {
34616+static const struct sysfs_ops cm_counter_ops = {
34617 .show = cm_show_counter
34618 };
34619
34620diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
34621index 4507043..14ad522 100644
34622--- a/drivers/infiniband/core/fmr_pool.c
34623+++ b/drivers/infiniband/core/fmr_pool.c
34624@@ -97,8 +97,8 @@ struct ib_fmr_pool {
34625
34626 struct task_struct *thread;
34627
34628- atomic_t req_ser;
34629- atomic_t flush_ser;
34630+ atomic_unchecked_t req_ser;
34631+ atomic_unchecked_t flush_ser;
34632
34633 wait_queue_head_t force_wait;
34634 };
34635@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34636 struct ib_fmr_pool *pool = pool_ptr;
34637
34638 do {
34639- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
34640+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
34641 ib_fmr_batch_release(pool);
34642
34643- atomic_inc(&pool->flush_ser);
34644+ atomic_inc_unchecked(&pool->flush_ser);
34645 wake_up_interruptible(&pool->force_wait);
34646
34647 if (pool->flush_function)
34648@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34649 }
34650
34651 set_current_state(TASK_INTERRUPTIBLE);
34652- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
34653+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
34654 !kthread_should_stop())
34655 schedule();
34656 __set_current_state(TASK_RUNNING);
34657@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
34658 pool->dirty_watermark = params->dirty_watermark;
34659 pool->dirty_len = 0;
34660 spin_lock_init(&pool->pool_lock);
34661- atomic_set(&pool->req_ser, 0);
34662- atomic_set(&pool->flush_ser, 0);
34663+ atomic_set_unchecked(&pool->req_ser, 0);
34664+ atomic_set_unchecked(&pool->flush_ser, 0);
34665 init_waitqueue_head(&pool->force_wait);
34666
34667 pool->thread = kthread_run(ib_fmr_cleanup_thread,
34668@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
34669 }
34670 spin_unlock_irq(&pool->pool_lock);
34671
34672- serial = atomic_inc_return(&pool->req_ser);
34673+ serial = atomic_inc_return_unchecked(&pool->req_ser);
34674 wake_up_process(pool->thread);
34675
34676 if (wait_event_interruptible(pool->force_wait,
34677- atomic_read(&pool->flush_ser) - serial >= 0))
34678+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
34679 return -EINTR;
34680
34681 return 0;
34682@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
34683 } else {
34684 list_add_tail(&fmr->list, &pool->dirty_list);
34685 if (++pool->dirty_len >= pool->dirty_watermark) {
34686- atomic_inc(&pool->req_ser);
34687+ atomic_inc_unchecked(&pool->req_ser);
34688 wake_up_process(pool->thread);
34689 }
34690 }
34691diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
34692index 158a214..1558bb7 100644
34693--- a/drivers/infiniband/core/sysfs.c
34694+++ b/drivers/infiniband/core/sysfs.c
34695@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
34696 return port_attr->show(p, port_attr, buf);
34697 }
34698
34699-static struct sysfs_ops port_sysfs_ops = {
34700+static const struct sysfs_ops port_sysfs_ops = {
34701 .show = port_attr_show
34702 };
34703
34704diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
34705index 5440da0..1194ecb 100644
34706--- a/drivers/infiniband/core/uverbs_marshall.c
34707+++ b/drivers/infiniband/core/uverbs_marshall.c
34708@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
34709 dst->grh.sgid_index = src->grh.sgid_index;
34710 dst->grh.hop_limit = src->grh.hop_limit;
34711 dst->grh.traffic_class = src->grh.traffic_class;
34712+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
34713 dst->dlid = src->dlid;
34714 dst->sl = src->sl;
34715 dst->src_path_bits = src->src_path_bits;
34716 dst->static_rate = src->static_rate;
34717 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
34718 dst->port_num = src->port_num;
34719+ dst->reserved = 0;
34720 }
34721 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
34722
34723 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
34724 struct ib_qp_attr *src)
34725 {
34726+ dst->qp_state = src->qp_state;
34727 dst->cur_qp_state = src->cur_qp_state;
34728 dst->path_mtu = src->path_mtu;
34729 dst->path_mig_state = src->path_mig_state;
34730@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
34731 dst->rnr_retry = src->rnr_retry;
34732 dst->alt_port_num = src->alt_port_num;
34733 dst->alt_timeout = src->alt_timeout;
34734+ memset(dst->reserved, 0, sizeof(dst->reserved));
34735 }
34736 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
34737
34738diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
34739index 100da85..62e6b88 100644
34740--- a/drivers/infiniband/hw/ipath/ipath_fs.c
34741+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
34742@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
34743 struct infinipath_counters counters;
34744 struct ipath_devdata *dd;
34745
34746+ pax_track_stack();
34747+
34748 dd = file->f_path.dentry->d_inode->i_private;
34749 dd->ipath_f_read_counters(dd, &counters);
34750
34751diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
34752index cbde0cf..afaf55c 100644
34753--- a/drivers/infiniband/hw/nes/nes.c
34754+++ b/drivers/infiniband/hw/nes/nes.c
34755@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
34756 LIST_HEAD(nes_adapter_list);
34757 static LIST_HEAD(nes_dev_list);
34758
34759-atomic_t qps_destroyed;
34760+atomic_unchecked_t qps_destroyed;
34761
34762 static unsigned int ee_flsh_adapter;
34763 static unsigned int sysfs_nonidx_addr;
34764@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
34765 struct nes_adapter *nesadapter = nesdev->nesadapter;
34766 u32 qp_id;
34767
34768- atomic_inc(&qps_destroyed);
34769+ atomic_inc_unchecked(&qps_destroyed);
34770
34771 /* Free the control structures */
34772
34773diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
34774index bcc6abc..9c76b2f 100644
34775--- a/drivers/infiniband/hw/nes/nes.h
34776+++ b/drivers/infiniband/hw/nes/nes.h
34777@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
34778 extern unsigned int wqm_quanta;
34779 extern struct list_head nes_adapter_list;
34780
34781-extern atomic_t cm_connects;
34782-extern atomic_t cm_accepts;
34783-extern atomic_t cm_disconnects;
34784-extern atomic_t cm_closes;
34785-extern atomic_t cm_connecteds;
34786-extern atomic_t cm_connect_reqs;
34787-extern atomic_t cm_rejects;
34788-extern atomic_t mod_qp_timouts;
34789-extern atomic_t qps_created;
34790-extern atomic_t qps_destroyed;
34791-extern atomic_t sw_qps_destroyed;
34792+extern atomic_unchecked_t cm_connects;
34793+extern atomic_unchecked_t cm_accepts;
34794+extern atomic_unchecked_t cm_disconnects;
34795+extern atomic_unchecked_t cm_closes;
34796+extern atomic_unchecked_t cm_connecteds;
34797+extern atomic_unchecked_t cm_connect_reqs;
34798+extern atomic_unchecked_t cm_rejects;
34799+extern atomic_unchecked_t mod_qp_timouts;
34800+extern atomic_unchecked_t qps_created;
34801+extern atomic_unchecked_t qps_destroyed;
34802+extern atomic_unchecked_t sw_qps_destroyed;
34803 extern u32 mh_detected;
34804 extern u32 mh_pauses_sent;
34805 extern u32 cm_packets_sent;
34806@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
34807 extern u32 cm_listens_created;
34808 extern u32 cm_listens_destroyed;
34809 extern u32 cm_backlog_drops;
34810-extern atomic_t cm_loopbacks;
34811-extern atomic_t cm_nodes_created;
34812-extern atomic_t cm_nodes_destroyed;
34813-extern atomic_t cm_accel_dropped_pkts;
34814-extern atomic_t cm_resets_recvd;
34815+extern atomic_unchecked_t cm_loopbacks;
34816+extern atomic_unchecked_t cm_nodes_created;
34817+extern atomic_unchecked_t cm_nodes_destroyed;
34818+extern atomic_unchecked_t cm_accel_dropped_pkts;
34819+extern atomic_unchecked_t cm_resets_recvd;
34820
34821 extern u32 int_mod_timer_init;
34822 extern u32 int_mod_cq_depth_256;
34823diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
34824index 73473db..5ed06e8 100644
34825--- a/drivers/infiniband/hw/nes/nes_cm.c
34826+++ b/drivers/infiniband/hw/nes/nes_cm.c
34827@@ -69,11 +69,11 @@ u32 cm_packets_received;
34828 u32 cm_listens_created;
34829 u32 cm_listens_destroyed;
34830 u32 cm_backlog_drops;
34831-atomic_t cm_loopbacks;
34832-atomic_t cm_nodes_created;
34833-atomic_t cm_nodes_destroyed;
34834-atomic_t cm_accel_dropped_pkts;
34835-atomic_t cm_resets_recvd;
34836+atomic_unchecked_t cm_loopbacks;
34837+atomic_unchecked_t cm_nodes_created;
34838+atomic_unchecked_t cm_nodes_destroyed;
34839+atomic_unchecked_t cm_accel_dropped_pkts;
34840+atomic_unchecked_t cm_resets_recvd;
34841
34842 static inline int mini_cm_accelerated(struct nes_cm_core *,
34843 struct nes_cm_node *);
34844@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
34845
34846 static struct nes_cm_core *g_cm_core;
34847
34848-atomic_t cm_connects;
34849-atomic_t cm_accepts;
34850-atomic_t cm_disconnects;
34851-atomic_t cm_closes;
34852-atomic_t cm_connecteds;
34853-atomic_t cm_connect_reqs;
34854-atomic_t cm_rejects;
34855+atomic_unchecked_t cm_connects;
34856+atomic_unchecked_t cm_accepts;
34857+atomic_unchecked_t cm_disconnects;
34858+atomic_unchecked_t cm_closes;
34859+atomic_unchecked_t cm_connecteds;
34860+atomic_unchecked_t cm_connect_reqs;
34861+atomic_unchecked_t cm_rejects;
34862
34863
34864 /**
34865@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
34866 cm_node->rem_mac);
34867
34868 add_hte_node(cm_core, cm_node);
34869- atomic_inc(&cm_nodes_created);
34870+ atomic_inc_unchecked(&cm_nodes_created);
34871
34872 return cm_node;
34873 }
34874@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
34875 }
34876
34877 atomic_dec(&cm_core->node_cnt);
34878- atomic_inc(&cm_nodes_destroyed);
34879+ atomic_inc_unchecked(&cm_nodes_destroyed);
34880 nesqp = cm_node->nesqp;
34881 if (nesqp) {
34882 nesqp->cm_node = NULL;
34883@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
34884
34885 static void drop_packet(struct sk_buff *skb)
34886 {
34887- atomic_inc(&cm_accel_dropped_pkts);
34888+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
34889 dev_kfree_skb_any(skb);
34890 }
34891
34892@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
34893
34894 int reset = 0; /* whether to send reset in case of err.. */
34895 int passive_state;
34896- atomic_inc(&cm_resets_recvd);
34897+ atomic_inc_unchecked(&cm_resets_recvd);
34898 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
34899 " refcnt=%d\n", cm_node, cm_node->state,
34900 atomic_read(&cm_node->ref_count));
34901@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
34902 rem_ref_cm_node(cm_node->cm_core, cm_node);
34903 return NULL;
34904 }
34905- atomic_inc(&cm_loopbacks);
34906+ atomic_inc_unchecked(&cm_loopbacks);
34907 loopbackremotenode->loopbackpartner = cm_node;
34908 loopbackremotenode->tcp_cntxt.rcv_wscale =
34909 NES_CM_DEFAULT_RCV_WND_SCALE;
34910@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
34911 add_ref_cm_node(cm_node);
34912 } else if (cm_node->state == NES_CM_STATE_TSA) {
34913 rem_ref_cm_node(cm_core, cm_node);
34914- atomic_inc(&cm_accel_dropped_pkts);
34915+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
34916 dev_kfree_skb_any(skb);
34917 break;
34918 }
34919@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
34920
34921 if ((cm_id) && (cm_id->event_handler)) {
34922 if (issue_disconn) {
34923- atomic_inc(&cm_disconnects);
34924+ atomic_inc_unchecked(&cm_disconnects);
34925 cm_event.event = IW_CM_EVENT_DISCONNECT;
34926 cm_event.status = disconn_status;
34927 cm_event.local_addr = cm_id->local_addr;
34928@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
34929 }
34930
34931 if (issue_close) {
34932- atomic_inc(&cm_closes);
34933+ atomic_inc_unchecked(&cm_closes);
34934 nes_disconnect(nesqp, 1);
34935
34936 cm_id->provider_data = nesqp;
34937@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
34938
34939 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
34940 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
34941- atomic_inc(&cm_accepts);
34942+ atomic_inc_unchecked(&cm_accepts);
34943
34944 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
34945 atomic_read(&nesvnic->netdev->refcnt));
34946@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
34947
34948 struct nes_cm_core *cm_core;
34949
34950- atomic_inc(&cm_rejects);
34951+ atomic_inc_unchecked(&cm_rejects);
34952 cm_node = (struct nes_cm_node *) cm_id->provider_data;
34953 loopback = cm_node->loopbackpartner;
34954 cm_core = cm_node->cm_core;
34955@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
34956 ntohl(cm_id->local_addr.sin_addr.s_addr),
34957 ntohs(cm_id->local_addr.sin_port));
34958
34959- atomic_inc(&cm_connects);
34960+ atomic_inc_unchecked(&cm_connects);
34961 nesqp->active_conn = 1;
34962
34963 /* cache the cm_id in the qp */
34964@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
34965 if (nesqp->destroyed) {
34966 return;
34967 }
34968- atomic_inc(&cm_connecteds);
34969+ atomic_inc_unchecked(&cm_connecteds);
34970 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
34971 " local port 0x%04X. jiffies = %lu.\n",
34972 nesqp->hwqp.qp_id,
34973@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
34974
34975 ret = cm_id->event_handler(cm_id, &cm_event);
34976 cm_id->add_ref(cm_id);
34977- atomic_inc(&cm_closes);
34978+ atomic_inc_unchecked(&cm_closes);
34979 cm_event.event = IW_CM_EVENT_CLOSE;
34980 cm_event.status = IW_CM_EVENT_STATUS_OK;
34981 cm_event.provider_data = cm_id->provider_data;
34982@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
34983 return;
34984 cm_id = cm_node->cm_id;
34985
34986- atomic_inc(&cm_connect_reqs);
34987+ atomic_inc_unchecked(&cm_connect_reqs);
34988 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
34989 cm_node, cm_id, jiffies);
34990
34991@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
34992 return;
34993 cm_id = cm_node->cm_id;
34994
34995- atomic_inc(&cm_connect_reqs);
34996+ atomic_inc_unchecked(&cm_connect_reqs);
34997 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
34998 cm_node, cm_id, jiffies);
34999
35000diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35001index e593af3..870694a 100644
35002--- a/drivers/infiniband/hw/nes/nes_nic.c
35003+++ b/drivers/infiniband/hw/nes/nes_nic.c
35004@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35005 target_stat_values[++index] = mh_detected;
35006 target_stat_values[++index] = mh_pauses_sent;
35007 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35008- target_stat_values[++index] = atomic_read(&cm_connects);
35009- target_stat_values[++index] = atomic_read(&cm_accepts);
35010- target_stat_values[++index] = atomic_read(&cm_disconnects);
35011- target_stat_values[++index] = atomic_read(&cm_connecteds);
35012- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35013- target_stat_values[++index] = atomic_read(&cm_rejects);
35014- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35015- target_stat_values[++index] = atomic_read(&qps_created);
35016- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35017- target_stat_values[++index] = atomic_read(&qps_destroyed);
35018- target_stat_values[++index] = atomic_read(&cm_closes);
35019+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35020+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35021+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35022+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35023+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35024+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35025+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35026+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35027+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35028+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35029+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35030 target_stat_values[++index] = cm_packets_sent;
35031 target_stat_values[++index] = cm_packets_bounced;
35032 target_stat_values[++index] = cm_packets_created;
35033@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35034 target_stat_values[++index] = cm_listens_created;
35035 target_stat_values[++index] = cm_listens_destroyed;
35036 target_stat_values[++index] = cm_backlog_drops;
35037- target_stat_values[++index] = atomic_read(&cm_loopbacks);
35038- target_stat_values[++index] = atomic_read(&cm_nodes_created);
35039- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35040- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35041- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35042+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35043+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35044+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35045+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35046+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35047 target_stat_values[++index] = int_mod_timer_init;
35048 target_stat_values[++index] = int_mod_cq_depth_1;
35049 target_stat_values[++index] = int_mod_cq_depth_4;
35050diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35051index a680c42..f914deb 100644
35052--- a/drivers/infiniband/hw/nes/nes_verbs.c
35053+++ b/drivers/infiniband/hw/nes/nes_verbs.c
35054@@ -45,9 +45,9 @@
35055
35056 #include <rdma/ib_umem.h>
35057
35058-atomic_t mod_qp_timouts;
35059-atomic_t qps_created;
35060-atomic_t sw_qps_destroyed;
35061+atomic_unchecked_t mod_qp_timouts;
35062+atomic_unchecked_t qps_created;
35063+atomic_unchecked_t sw_qps_destroyed;
35064
35065 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35066
35067@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35068 if (init_attr->create_flags)
35069 return ERR_PTR(-EINVAL);
35070
35071- atomic_inc(&qps_created);
35072+ atomic_inc_unchecked(&qps_created);
35073 switch (init_attr->qp_type) {
35074 case IB_QPT_RC:
35075 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35076@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35077 struct iw_cm_event cm_event;
35078 int ret;
35079
35080- atomic_inc(&sw_qps_destroyed);
35081+ atomic_inc_unchecked(&sw_qps_destroyed);
35082 nesqp->destroyed = 1;
35083
35084 /* Blow away the connection if it exists. */
35085diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35086index ac11be0..3883c04 100644
35087--- a/drivers/input/gameport/gameport.c
35088+++ b/drivers/input/gameport/gameport.c
35089@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
35090 */
35091 static void gameport_init_port(struct gameport *gameport)
35092 {
35093- static atomic_t gameport_no = ATOMIC_INIT(0);
35094+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35095
35096 __module_get(THIS_MODULE);
35097
35098 mutex_init(&gameport->drv_mutex);
35099 device_initialize(&gameport->dev);
35100- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
35101+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35102 gameport->dev.bus = &gameport_bus;
35103 gameport->dev.release = gameport_release_port;
35104 if (gameport->parent)
35105diff --git a/drivers/input/input.c b/drivers/input/input.c
35106index c82ae82..8cfb9cb 100644
35107--- a/drivers/input/input.c
35108+++ b/drivers/input/input.c
35109@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
35110 */
35111 int input_register_device(struct input_dev *dev)
35112 {
35113- static atomic_t input_no = ATOMIC_INIT(0);
35114+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35115 struct input_handler *handler;
35116 const char *path;
35117 int error;
35118@@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
35119 dev->setkeycode = input_default_setkeycode;
35120
35121 dev_set_name(&dev->dev, "input%ld",
35122- (unsigned long) atomic_inc_return(&input_no) - 1);
35123+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35124
35125 error = device_add(&dev->dev);
35126 if (error)
35127diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35128index ca13a6b..b032b0c 100644
35129--- a/drivers/input/joystick/sidewinder.c
35130+++ b/drivers/input/joystick/sidewinder.c
35131@@ -30,6 +30,7 @@
35132 #include <linux/kernel.h>
35133 #include <linux/module.h>
35134 #include <linux/slab.h>
35135+#include <linux/sched.h>
35136 #include <linux/init.h>
35137 #include <linux/input.h>
35138 #include <linux/gameport.h>
35139@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
35140 unsigned char buf[SW_LENGTH];
35141 int i;
35142
35143+ pax_track_stack();
35144+
35145 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
35146
35147 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
35148diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35149index 79e3edc..01412b9 100644
35150--- a/drivers/input/joystick/xpad.c
35151+++ b/drivers/input/joystick/xpad.c
35152@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35153
35154 static int xpad_led_probe(struct usb_xpad *xpad)
35155 {
35156- static atomic_t led_seq = ATOMIC_INIT(0);
35157+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35158 long led_no;
35159 struct xpad_led *led;
35160 struct led_classdev *led_cdev;
35161@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35162 if (!led)
35163 return -ENOMEM;
35164
35165- led_no = (long)atomic_inc_return(&led_seq) - 1;
35166+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35167
35168 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35169 led->xpad = xpad;
35170diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35171index 0236f0d..c7327f1 100644
35172--- a/drivers/input/serio/serio.c
35173+++ b/drivers/input/serio/serio.c
35174@@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
35175 */
35176 static void serio_init_port(struct serio *serio)
35177 {
35178- static atomic_t serio_no = ATOMIC_INIT(0);
35179+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35180
35181 __module_get(THIS_MODULE);
35182
35183@@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
35184 mutex_init(&serio->drv_mutex);
35185 device_initialize(&serio->dev);
35186 dev_set_name(&serio->dev, "serio%ld",
35187- (long)atomic_inc_return(&serio_no) - 1);
35188+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
35189 serio->dev.bus = &serio_bus;
35190 serio->dev.release = serio_release_port;
35191 if (serio->parent) {
35192diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
35193index 33dcd8d..2783d25 100644
35194--- a/drivers/isdn/gigaset/common.c
35195+++ b/drivers/isdn/gigaset/common.c
35196@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
35197 cs->commands_pending = 0;
35198 cs->cur_at_seq = 0;
35199 cs->gotfwver = -1;
35200- cs->open_count = 0;
35201+ local_set(&cs->open_count, 0);
35202 cs->dev = NULL;
35203 cs->tty = NULL;
35204 cs->tty_dev = NULL;
35205diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
35206index a2f6125..6a70677 100644
35207--- a/drivers/isdn/gigaset/gigaset.h
35208+++ b/drivers/isdn/gigaset/gigaset.h
35209@@ -34,6 +34,7 @@
35210 #include <linux/tty_driver.h>
35211 #include <linux/list.h>
35212 #include <asm/atomic.h>
35213+#include <asm/local.h>
35214
35215 #define GIG_VERSION {0,5,0,0}
35216 #define GIG_COMPAT {0,4,0,0}
35217@@ -446,7 +447,7 @@ struct cardstate {
35218 spinlock_t cmdlock;
35219 unsigned curlen, cmdbytes;
35220
35221- unsigned open_count;
35222+ local_t open_count;
35223 struct tty_struct *tty;
35224 struct tasklet_struct if_wake_tasklet;
35225 unsigned control_state;
35226diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35227index b3065b8..c7e8cc9 100644
35228--- a/drivers/isdn/gigaset/interface.c
35229+++ b/drivers/isdn/gigaset/interface.c
35230@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35231 return -ERESTARTSYS; // FIXME -EINTR?
35232 tty->driver_data = cs;
35233
35234- ++cs->open_count;
35235-
35236- if (cs->open_count == 1) {
35237+ if (local_inc_return(&cs->open_count) == 1) {
35238 spin_lock_irqsave(&cs->lock, flags);
35239 cs->tty = tty;
35240 spin_unlock_irqrestore(&cs->lock, flags);
35241@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35242
35243 if (!cs->connected)
35244 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35245- else if (!cs->open_count)
35246+ else if (!local_read(&cs->open_count))
35247 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35248 else {
35249- if (!--cs->open_count) {
35250+ if (!local_dec_return(&cs->open_count)) {
35251 spin_lock_irqsave(&cs->lock, flags);
35252 cs->tty = NULL;
35253 spin_unlock_irqrestore(&cs->lock, flags);
35254@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
35255 if (!cs->connected) {
35256 gig_dbg(DEBUG_IF, "not connected");
35257 retval = -ENODEV;
35258- } else if (!cs->open_count)
35259+ } else if (!local_read(&cs->open_count))
35260 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35261 else {
35262 retval = 0;
35263@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
35264 if (!cs->connected) {
35265 gig_dbg(DEBUG_IF, "not connected");
35266 retval = -ENODEV;
35267- } else if (!cs->open_count)
35268+ } else if (!local_read(&cs->open_count))
35269 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35270 else if (cs->mstate != MS_LOCKED) {
35271 dev_warn(cs->dev, "can't write to unlocked device\n");
35272@@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
35273 if (!cs->connected) {
35274 gig_dbg(DEBUG_IF, "not connected");
35275 retval = -ENODEV;
35276- } else if (!cs->open_count)
35277+ } else if (!local_read(&cs->open_count))
35278 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35279 else if (cs->mstate != MS_LOCKED) {
35280 dev_warn(cs->dev, "can't write to unlocked device\n");
35281@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
35282
35283 if (!cs->connected)
35284 gig_dbg(DEBUG_IF, "not connected");
35285- else if (!cs->open_count)
35286+ else if (!local_read(&cs->open_count))
35287 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35288 else if (cs->mstate != MS_LOCKED)
35289 dev_warn(cs->dev, "can't write to unlocked device\n");
35290@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
35291
35292 if (!cs->connected)
35293 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35294- else if (!cs->open_count)
35295+ else if (!local_read(&cs->open_count))
35296 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35297 else {
35298 //FIXME
35299@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
35300
35301 if (!cs->connected)
35302 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35303- else if (!cs->open_count)
35304+ else if (!local_read(&cs->open_count))
35305 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35306 else {
35307 //FIXME
35308@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
35309 goto out;
35310 }
35311
35312- if (!cs->open_count) {
35313+ if (!local_read(&cs->open_count)) {
35314 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35315 goto out;
35316 }
35317diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35318index a7c0083..62a7cb6 100644
35319--- a/drivers/isdn/hardware/avm/b1.c
35320+++ b/drivers/isdn/hardware/avm/b1.c
35321@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
35322 }
35323 if (left) {
35324 if (t4file->user) {
35325- if (copy_from_user(buf, dp, left))
35326+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35327 return -EFAULT;
35328 } else {
35329 memcpy(buf, dp, left);
35330@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
35331 }
35332 if (left) {
35333 if (config->user) {
35334- if (copy_from_user(buf, dp, left))
35335+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35336 return -EFAULT;
35337 } else {
35338 memcpy(buf, dp, left);
35339diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
35340index f130724..c373c68 100644
35341--- a/drivers/isdn/hardware/eicon/capidtmf.c
35342+++ b/drivers/isdn/hardware/eicon/capidtmf.c
35343@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
35344 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
35345 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
35346
35347+ pax_track_stack();
35348
35349 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
35350 {
35351diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
35352index 4d425c6..a9be6c4 100644
35353--- a/drivers/isdn/hardware/eicon/capifunc.c
35354+++ b/drivers/isdn/hardware/eicon/capifunc.c
35355@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
35356 IDI_SYNC_REQ req;
35357 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35358
35359+ pax_track_stack();
35360+
35361 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35362
35363 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35364diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
35365index 3029234..ef0d9e2 100644
35366--- a/drivers/isdn/hardware/eicon/diddfunc.c
35367+++ b/drivers/isdn/hardware/eicon/diddfunc.c
35368@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35369 IDI_SYNC_REQ req;
35370 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35371
35372+ pax_track_stack();
35373+
35374 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35375
35376 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35377diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
35378index d36a4c0..11e7d1a 100644
35379--- a/drivers/isdn/hardware/eicon/divasfunc.c
35380+++ b/drivers/isdn/hardware/eicon/divasfunc.c
35381@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35382 IDI_SYNC_REQ req;
35383 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35384
35385+ pax_track_stack();
35386+
35387 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35388
35389 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35390diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
35391index 85784a7..a19ca98 100644
35392--- a/drivers/isdn/hardware/eicon/divasync.h
35393+++ b/drivers/isdn/hardware/eicon/divasync.h
35394@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
35395 } diva_didd_add_adapter_t;
35396 typedef struct _diva_didd_remove_adapter {
35397 IDI_CALL p_request;
35398-} diva_didd_remove_adapter_t;
35399+} __no_const diva_didd_remove_adapter_t;
35400 typedef struct _diva_didd_read_adapter_array {
35401 void * buffer;
35402 dword length;
35403diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
35404index db87d51..7d09acf 100644
35405--- a/drivers/isdn/hardware/eicon/idifunc.c
35406+++ b/drivers/isdn/hardware/eicon/idifunc.c
35407@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35408 IDI_SYNC_REQ req;
35409 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35410
35411+ pax_track_stack();
35412+
35413 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35414
35415 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35416diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
35417index ae89fb8..0fab299 100644
35418--- a/drivers/isdn/hardware/eicon/message.c
35419+++ b/drivers/isdn/hardware/eicon/message.c
35420@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
35421 dword d;
35422 word w;
35423
35424+ pax_track_stack();
35425+
35426 a = plci->adapter;
35427 Id = ((word)plci->Id<<8)|a->Id;
35428 PUT_WORD(&SS_Ind[4],0x0000);
35429@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
35430 word j, n, w;
35431 dword d;
35432
35433+ pax_track_stack();
35434+
35435
35436 for(i=0;i<8;i++) bp_parms[i].length = 0;
35437 for(i=0;i<2;i++) global_config[i].length = 0;
35438@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
35439 const byte llc3[] = {4,3,2,2,6,6,0};
35440 const byte header[] = {0,2,3,3,0,0,0};
35441
35442+ pax_track_stack();
35443+
35444 for(i=0;i<8;i++) bp_parms[i].length = 0;
35445 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
35446 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
35447@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
35448 word appl_number_group_type[MAX_APPL];
35449 PLCI *auxplci;
35450
35451+ pax_track_stack();
35452+
35453 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
35454
35455 if(!a->group_optimization_enabled)
35456diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
35457index a564b75..f3cf8b5 100644
35458--- a/drivers/isdn/hardware/eicon/mntfunc.c
35459+++ b/drivers/isdn/hardware/eicon/mntfunc.c
35460@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35461 IDI_SYNC_REQ req;
35462 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35463
35464+ pax_track_stack();
35465+
35466 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35467
35468 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35469diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
35470index a3bd163..8956575 100644
35471--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
35472+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
35473@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
35474 typedef struct _diva_os_idi_adapter_interface {
35475 diva_init_card_proc_t cleanup_adapter_proc;
35476 diva_cmd_card_proc_t cmd_proc;
35477-} diva_os_idi_adapter_interface_t;
35478+} __no_const diva_os_idi_adapter_interface_t;
35479
35480 typedef struct _diva_os_xdi_adapter {
35481 struct list_head link;
35482diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
35483index adb1e8c..21b590b 100644
35484--- a/drivers/isdn/i4l/isdn_common.c
35485+++ b/drivers/isdn/i4l/isdn_common.c
35486@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
35487 } iocpar;
35488 void __user *argp = (void __user *)arg;
35489
35490+ pax_track_stack();
35491+
35492 #define name iocpar.name
35493 #define bname iocpar.bname
35494 #define iocts iocpar.iocts
35495diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
35496index bf7997a..cf091db 100644
35497--- a/drivers/isdn/icn/icn.c
35498+++ b/drivers/isdn/icn/icn.c
35499@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
35500 if (count > len)
35501 count = len;
35502 if (user) {
35503- if (copy_from_user(msg, buf, count))
35504+ if (count > sizeof msg || copy_from_user(msg, buf, count))
35505 return -EFAULT;
35506 } else
35507 memcpy(msg, buf, count);
35508diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
35509index feb0fa4..f76f830 100644
35510--- a/drivers/isdn/mISDN/socket.c
35511+++ b/drivers/isdn/mISDN/socket.c
35512@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35513 if (dev) {
35514 struct mISDN_devinfo di;
35515
35516+ memset(&di, 0, sizeof(di));
35517 di.id = dev->id;
35518 di.Dprotocols = dev->Dprotocols;
35519 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35520@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35521 if (dev) {
35522 struct mISDN_devinfo di;
35523
35524+ memset(&di, 0, sizeof(di));
35525 di.id = dev->id;
35526 di.Dprotocols = dev->Dprotocols;
35527 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35528diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
35529index 485be8b..f0225bc 100644
35530--- a/drivers/isdn/sc/interrupt.c
35531+++ b/drivers/isdn/sc/interrupt.c
35532@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35533 }
35534 else if(callid>=0x0000 && callid<=0x7FFF)
35535 {
35536+ int len;
35537+
35538 pr_debug("%s: Got Incoming Call\n",
35539 sc_adapter[card]->devicename);
35540- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
35541- strcpy(setup.eazmsn,
35542- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
35543+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
35544+ sizeof(setup.phone));
35545+ if (len >= sizeof(setup.phone))
35546+ continue;
35547+ len = strlcpy(setup.eazmsn,
35548+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35549+ sizeof(setup.eazmsn));
35550+ if (len >= sizeof(setup.eazmsn))
35551+ continue;
35552 setup.si1 = 7;
35553 setup.si2 = 0;
35554 setup.plan = 0;
35555@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35556 * Handle a GetMyNumber Rsp
35557 */
35558 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
35559- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
35560+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35561+ rcvmsg.msg_data.byte_array,
35562+ sizeof(rcvmsg.msg_data.byte_array));
35563 continue;
35564 }
35565
35566diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
35567index 8744d24..d1f9a9a 100644
35568--- a/drivers/lguest/core.c
35569+++ b/drivers/lguest/core.c
35570@@ -91,9 +91,17 @@ static __init int map_switcher(void)
35571 * it's worked so far. The end address needs +1 because __get_vm_area
35572 * allocates an extra guard page, so we need space for that.
35573 */
35574+
35575+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
35576+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35577+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
35578+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35579+#else
35580 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35581 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
35582 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35583+#endif
35584+
35585 if (!switcher_vma) {
35586 err = -ENOMEM;
35587 printk("lguest: could not map switcher pages high\n");
35588@@ -118,7 +126,7 @@ static __init int map_switcher(void)
35589 * Now the Switcher is mapped at the right address, we can't fail!
35590 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
35591 */
35592- memcpy(switcher_vma->addr, start_switcher_text,
35593+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
35594 end_switcher_text - start_switcher_text);
35595
35596 printk(KERN_INFO "lguest: mapped switcher at %p\n",
35597diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
35598index 6ae3888..8b38145 100644
35599--- a/drivers/lguest/x86/core.c
35600+++ b/drivers/lguest/x86/core.c
35601@@ -59,7 +59,7 @@ static struct {
35602 /* Offset from where switcher.S was compiled to where we've copied it */
35603 static unsigned long switcher_offset(void)
35604 {
35605- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
35606+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
35607 }
35608
35609 /* This cpu's struct lguest_pages. */
35610@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
35611 * These copies are pretty cheap, so we do them unconditionally: */
35612 /* Save the current Host top-level page directory.
35613 */
35614+
35615+#ifdef CONFIG_PAX_PER_CPU_PGD
35616+ pages->state.host_cr3 = read_cr3();
35617+#else
35618 pages->state.host_cr3 = __pa(current->mm->pgd);
35619+#endif
35620+
35621 /*
35622 * Set up the Guest's page tables to see this CPU's pages (and no
35623 * other CPU's pages).
35624@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
35625 * compiled-in switcher code and the high-mapped copy we just made.
35626 */
35627 for (i = 0; i < IDT_ENTRIES; i++)
35628- default_idt_entries[i] += switcher_offset();
35629+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
35630
35631 /*
35632 * Set up the Switcher's per-cpu areas.
35633@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
35634 * it will be undisturbed when we switch. To change %cs and jump we
35635 * need this structure to feed to Intel's "lcall" instruction.
35636 */
35637- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
35638+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
35639 lguest_entry.segment = LGUEST_CS;
35640
35641 /*
35642diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
35643index 40634b0..4f5855e 100644
35644--- a/drivers/lguest/x86/switcher_32.S
35645+++ b/drivers/lguest/x86/switcher_32.S
35646@@ -87,6 +87,7 @@
35647 #include <asm/page.h>
35648 #include <asm/segment.h>
35649 #include <asm/lguest.h>
35650+#include <asm/processor-flags.h>
35651
35652 // We mark the start of the code to copy
35653 // It's placed in .text tho it's never run here
35654@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
35655 // Changes type when we load it: damn Intel!
35656 // For after we switch over our page tables
35657 // That entry will be read-only: we'd crash.
35658+
35659+#ifdef CONFIG_PAX_KERNEXEC
35660+ mov %cr0, %edx
35661+ xor $X86_CR0_WP, %edx
35662+ mov %edx, %cr0
35663+#endif
35664+
35665 movl $(GDT_ENTRY_TSS*8), %edx
35666 ltr %dx
35667
35668@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
35669 // Let's clear it again for our return.
35670 // The GDT descriptor of the Host
35671 // Points to the table after two "size" bytes
35672- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
35673+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
35674 // Clear "used" from type field (byte 5, bit 2)
35675- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
35676+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
35677+
35678+#ifdef CONFIG_PAX_KERNEXEC
35679+ mov %cr0, %eax
35680+ xor $X86_CR0_WP, %eax
35681+ mov %eax, %cr0
35682+#endif
35683
35684 // Once our page table's switched, the Guest is live!
35685 // The Host fades as we run this final step.
35686@@ -295,13 +309,12 @@ deliver_to_host:
35687 // I consulted gcc, and it gave
35688 // These instructions, which I gladly credit:
35689 leal (%edx,%ebx,8), %eax
35690- movzwl (%eax),%edx
35691- movl 4(%eax), %eax
35692- xorw %ax, %ax
35693- orl %eax, %edx
35694+ movl 4(%eax), %edx
35695+ movw (%eax), %dx
35696 // Now the address of the handler's in %edx
35697 // We call it now: its "iret" drops us home.
35698- jmp *%edx
35699+ ljmp $__KERNEL_CS, $1f
35700+1: jmp *%edx
35701
35702 // Every interrupt can come to us here
35703 // But we must truly tell each apart.
35704diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
35705index 588a5b0..b71db89 100644
35706--- a/drivers/macintosh/macio_asic.c
35707+++ b/drivers/macintosh/macio_asic.c
35708@@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
35709 * MacIO is matched against any Apple ID, it's probe() function
35710 * will then decide wether it applies or not
35711 */
35712-static const struct pci_device_id __devinitdata pci_ids [] = { {
35713+static const struct pci_device_id __devinitconst pci_ids [] = { {
35714 .vendor = PCI_VENDOR_ID_APPLE,
35715 .device = PCI_ANY_ID,
35716 .subvendor = PCI_ANY_ID,
35717diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
35718index a348bb0..ecd9b3f 100644
35719--- a/drivers/macintosh/via-pmu-backlight.c
35720+++ b/drivers/macintosh/via-pmu-backlight.c
35721@@ -15,7 +15,7 @@
35722
35723 #define MAX_PMU_LEVEL 0xFF
35724
35725-static struct backlight_ops pmu_backlight_data;
35726+static const struct backlight_ops pmu_backlight_data;
35727 static DEFINE_SPINLOCK(pmu_backlight_lock);
35728 static int sleeping, uses_pmu_bl;
35729 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
35730@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
35731 return bd->props.brightness;
35732 }
35733
35734-static struct backlight_ops pmu_backlight_data = {
35735+static const struct backlight_ops pmu_backlight_data = {
35736 .get_brightness = pmu_backlight_get_brightness,
35737 .update_status = pmu_backlight_update_status,
35738
35739diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
35740index 6f308a4..b5f7ff7 100644
35741--- a/drivers/macintosh/via-pmu.c
35742+++ b/drivers/macintosh/via-pmu.c
35743@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
35744 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
35745 }
35746
35747-static struct platform_suspend_ops pmu_pm_ops = {
35748+static const struct platform_suspend_ops pmu_pm_ops = {
35749 .enter = powerbook_sleep,
35750 .valid = pmu_sleep_valid,
35751 };
35752diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
35753index 818b617..4656e38 100644
35754--- a/drivers/md/dm-ioctl.c
35755+++ b/drivers/md/dm-ioctl.c
35756@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
35757 cmd == DM_LIST_VERSIONS_CMD)
35758 return 0;
35759
35760- if ((cmd == DM_DEV_CREATE_CMD)) {
35761+ if (cmd == DM_DEV_CREATE_CMD) {
35762 if (!*param->name) {
35763 DMWARN("name not supplied when creating device");
35764 return -EINVAL;
35765diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
35766index 6021d0a..a878643 100644
35767--- a/drivers/md/dm-raid1.c
35768+++ b/drivers/md/dm-raid1.c
35769@@ -41,7 +41,7 @@ enum dm_raid1_error {
35770
35771 struct mirror {
35772 struct mirror_set *ms;
35773- atomic_t error_count;
35774+ atomic_unchecked_t error_count;
35775 unsigned long error_type;
35776 struct dm_dev *dev;
35777 sector_t offset;
35778@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
35779 * simple way to tell if a device has encountered
35780 * errors.
35781 */
35782- atomic_inc(&m->error_count);
35783+ atomic_inc_unchecked(&m->error_count);
35784
35785 if (test_and_set_bit(error_type, &m->error_type))
35786 return;
35787@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
35788 }
35789
35790 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
35791- if (!atomic_read(&new->error_count)) {
35792+ if (!atomic_read_unchecked(&new->error_count)) {
35793 set_default_mirror(new);
35794 break;
35795 }
35796@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
35797 struct mirror *m = get_default_mirror(ms);
35798
35799 do {
35800- if (likely(!atomic_read(&m->error_count)))
35801+ if (likely(!atomic_read_unchecked(&m->error_count)))
35802 return m;
35803
35804 if (m-- == ms->mirror)
35805@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
35806 {
35807 struct mirror *default_mirror = get_default_mirror(m->ms);
35808
35809- return !atomic_read(&default_mirror->error_count);
35810+ return !atomic_read_unchecked(&default_mirror->error_count);
35811 }
35812
35813 static int mirror_available(struct mirror_set *ms, struct bio *bio)
35814@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
35815 */
35816 if (likely(region_in_sync(ms, region, 1)))
35817 m = choose_mirror(ms, bio->bi_sector);
35818- else if (m && atomic_read(&m->error_count))
35819+ else if (m && atomic_read_unchecked(&m->error_count))
35820 m = NULL;
35821
35822 if (likely(m))
35823@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
35824 }
35825
35826 ms->mirror[mirror].ms = ms;
35827- atomic_set(&(ms->mirror[mirror].error_count), 0);
35828+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
35829 ms->mirror[mirror].error_type = 0;
35830 ms->mirror[mirror].offset = offset;
35831
35832@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
35833 */
35834 static char device_status_char(struct mirror *m)
35835 {
35836- if (!atomic_read(&(m->error_count)))
35837+ if (!atomic_read_unchecked(&(m->error_count)))
35838 return 'A';
35839
35840 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
35841diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
35842index bd58703..9f26571 100644
35843--- a/drivers/md/dm-stripe.c
35844+++ b/drivers/md/dm-stripe.c
35845@@ -20,7 +20,7 @@ struct stripe {
35846 struct dm_dev *dev;
35847 sector_t physical_start;
35848
35849- atomic_t error_count;
35850+ atomic_unchecked_t error_count;
35851 };
35852
35853 struct stripe_c {
35854@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
35855 kfree(sc);
35856 return r;
35857 }
35858- atomic_set(&(sc->stripe[i].error_count), 0);
35859+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
35860 }
35861
35862 ti->private = sc;
35863@@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
35864 DMEMIT("%d ", sc->stripes);
35865 for (i = 0; i < sc->stripes; i++) {
35866 DMEMIT("%s ", sc->stripe[i].dev->name);
35867- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
35868+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
35869 'D' : 'A';
35870 }
35871 buffer[i] = '\0';
35872@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
35873 */
35874 for (i = 0; i < sc->stripes; i++)
35875 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
35876- atomic_inc(&(sc->stripe[i].error_count));
35877- if (atomic_read(&(sc->stripe[i].error_count)) <
35878+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
35879+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
35880 DM_IO_ERROR_THRESHOLD)
35881 queue_work(kstriped, &sc->kstriped_ws);
35882 }
35883diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
35884index 4b04590..13a77b2 100644
35885--- a/drivers/md/dm-sysfs.c
35886+++ b/drivers/md/dm-sysfs.c
35887@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
35888 NULL,
35889 };
35890
35891-static struct sysfs_ops dm_sysfs_ops = {
35892+static const struct sysfs_ops dm_sysfs_ops = {
35893 .show = dm_attr_show,
35894 };
35895
35896diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
35897index 03345bb..332250d 100644
35898--- a/drivers/md/dm-table.c
35899+++ b/drivers/md/dm-table.c
35900@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
35901 if (!dev_size)
35902 return 0;
35903
35904- if ((start >= dev_size) || (start + len > dev_size)) {
35905+ if ((start >= dev_size) || (len > dev_size - start)) {
35906 DMWARN("%s: %s too small for target: "
35907 "start=%llu, len=%llu, dev_size=%llu",
35908 dm_device_name(ti->table->md), bdevname(bdev, b),
35909diff --git a/drivers/md/dm.c b/drivers/md/dm.c
35910index c988ac2..c418141 100644
35911--- a/drivers/md/dm.c
35912+++ b/drivers/md/dm.c
35913@@ -165,9 +165,9 @@ struct mapped_device {
35914 /*
35915 * Event handling.
35916 */
35917- atomic_t event_nr;
35918+ atomic_unchecked_t event_nr;
35919 wait_queue_head_t eventq;
35920- atomic_t uevent_seq;
35921+ atomic_unchecked_t uevent_seq;
35922 struct list_head uevent_list;
35923 spinlock_t uevent_lock; /* Protect access to uevent_list */
35924
35925@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
35926 rwlock_init(&md->map_lock);
35927 atomic_set(&md->holders, 1);
35928 atomic_set(&md->open_count, 0);
35929- atomic_set(&md->event_nr, 0);
35930- atomic_set(&md->uevent_seq, 0);
35931+ atomic_set_unchecked(&md->event_nr, 0);
35932+ atomic_set_unchecked(&md->uevent_seq, 0);
35933 INIT_LIST_HEAD(&md->uevent_list);
35934 spin_lock_init(&md->uevent_lock);
35935
35936@@ -1927,7 +1927,7 @@ static void event_callback(void *context)
35937
35938 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
35939
35940- atomic_inc(&md->event_nr);
35941+ atomic_inc_unchecked(&md->event_nr);
35942 wake_up(&md->eventq);
35943 }
35944
35945@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
35946
35947 uint32_t dm_next_uevent_seq(struct mapped_device *md)
35948 {
35949- return atomic_add_return(1, &md->uevent_seq);
35950+ return atomic_add_return_unchecked(1, &md->uevent_seq);
35951 }
35952
35953 uint32_t dm_get_event_nr(struct mapped_device *md)
35954 {
35955- return atomic_read(&md->event_nr);
35956+ return atomic_read_unchecked(&md->event_nr);
35957 }
35958
35959 int dm_wait_event(struct mapped_device *md, int event_nr)
35960 {
35961 return wait_event_interruptible(md->eventq,
35962- (event_nr != atomic_read(&md->event_nr)));
35963+ (event_nr != atomic_read_unchecked(&md->event_nr)));
35964 }
35965
35966 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
35967diff --git a/drivers/md/md.c b/drivers/md/md.c
35968index 4ce6e2f..7a9530a 100644
35969--- a/drivers/md/md.c
35970+++ b/drivers/md/md.c
35971@@ -153,10 +153,10 @@ static int start_readonly;
35972 * start build, activate spare
35973 */
35974 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
35975-static atomic_t md_event_count;
35976+static atomic_unchecked_t md_event_count;
35977 void md_new_event(mddev_t *mddev)
35978 {
35979- atomic_inc(&md_event_count);
35980+ atomic_inc_unchecked(&md_event_count);
35981 wake_up(&md_event_waiters);
35982 }
35983 EXPORT_SYMBOL_GPL(md_new_event);
35984@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
35985 */
35986 static void md_new_event_inintr(mddev_t *mddev)
35987 {
35988- atomic_inc(&md_event_count);
35989+ atomic_inc_unchecked(&md_event_count);
35990 wake_up(&md_event_waiters);
35991 }
35992
35993@@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
35994
35995 rdev->preferred_minor = 0xffff;
35996 rdev->data_offset = le64_to_cpu(sb->data_offset);
35997- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
35998+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
35999
36000 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36001 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36002@@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
36003 else
36004 sb->resync_offset = cpu_to_le64(0);
36005
36006- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36007+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36008
36009 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36010 sb->size = cpu_to_le64(mddev->dev_sectors);
36011@@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36012 static ssize_t
36013 errors_show(mdk_rdev_t *rdev, char *page)
36014 {
36015- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36016+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36017 }
36018
36019 static ssize_t
36020@@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
36021 char *e;
36022 unsigned long n = simple_strtoul(buf, &e, 10);
36023 if (*buf && (*e == 0 || *e == '\n')) {
36024- atomic_set(&rdev->corrected_errors, n);
36025+ atomic_set_unchecked(&rdev->corrected_errors, n);
36026 return len;
36027 }
36028 return -EINVAL;
36029@@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
36030 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
36031 kfree(rdev);
36032 }
36033-static struct sysfs_ops rdev_sysfs_ops = {
36034+static const struct sysfs_ops rdev_sysfs_ops = {
36035 .show = rdev_attr_show,
36036 .store = rdev_attr_store,
36037 };
36038@@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
36039 rdev->data_offset = 0;
36040 rdev->sb_events = 0;
36041 atomic_set(&rdev->nr_pending, 0);
36042- atomic_set(&rdev->read_errors, 0);
36043- atomic_set(&rdev->corrected_errors, 0);
36044+ atomic_set_unchecked(&rdev->read_errors, 0);
36045+ atomic_set_unchecked(&rdev->corrected_errors, 0);
36046
36047 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
36048 if (!size) {
36049@@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
36050 kfree(mddev);
36051 }
36052
36053-static struct sysfs_ops md_sysfs_ops = {
36054+static const struct sysfs_ops md_sysfs_ops = {
36055 .show = md_attr_show,
36056 .store = md_attr_store,
36057 };
36058@@ -4482,7 +4482,8 @@ out:
36059 err = 0;
36060 blk_integrity_unregister(disk);
36061 md_new_event(mddev);
36062- sysfs_notify_dirent(mddev->sysfs_state);
36063+ if (mddev->sysfs_state)
36064+ sysfs_notify_dirent(mddev->sysfs_state);
36065 return err;
36066 }
36067
36068@@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36069
36070 spin_unlock(&pers_lock);
36071 seq_printf(seq, "\n");
36072- mi->event = atomic_read(&md_event_count);
36073+ mi->event = atomic_read_unchecked(&md_event_count);
36074 return 0;
36075 }
36076 if (v == (void*)2) {
36077@@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36078 chunk_kb ? "KB" : "B");
36079 if (bitmap->file) {
36080 seq_printf(seq, ", file: ");
36081- seq_path(seq, &bitmap->file->f_path, " \t\n");
36082+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
36083 }
36084
36085 seq_printf(seq, "\n");
36086@@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36087 else {
36088 struct seq_file *p = file->private_data;
36089 p->private = mi;
36090- mi->event = atomic_read(&md_event_count);
36091+ mi->event = atomic_read_unchecked(&md_event_count);
36092 }
36093 return error;
36094 }
36095@@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36096 /* always allow read */
36097 mask = POLLIN | POLLRDNORM;
36098
36099- if (mi->event != atomic_read(&md_event_count))
36100+ if (mi->event != atomic_read_unchecked(&md_event_count))
36101 mask |= POLLERR | POLLPRI;
36102 return mask;
36103 }
36104@@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
36105 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36106 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36107 (int)part_stat_read(&disk->part0, sectors[1]) -
36108- atomic_read(&disk->sync_io);
36109+ atomic_read_unchecked(&disk->sync_io);
36110 /* sync IO will cause sync_io to increase before the disk_stats
36111 * as sync_io is counted when a request starts, and
36112 * disk_stats is counted when it completes.
36113diff --git a/drivers/md/md.h b/drivers/md/md.h
36114index 87430fe..0024a4c 100644
36115--- a/drivers/md/md.h
36116+++ b/drivers/md/md.h
36117@@ -94,10 +94,10 @@ struct mdk_rdev_s
36118 * only maintained for arrays that
36119 * support hot removal
36120 */
36121- atomic_t read_errors; /* number of consecutive read errors that
36122+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
36123 * we have tried to ignore.
36124 */
36125- atomic_t corrected_errors; /* number of corrected read errors,
36126+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36127 * for reporting to userspace and storing
36128 * in superblock.
36129 */
36130@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
36131
36132 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36133 {
36134- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36135+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36136 }
36137
36138 struct mdk_personality
36139diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36140index 968cb14..f0ad2e4 100644
36141--- a/drivers/md/raid1.c
36142+++ b/drivers/md/raid1.c
36143@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
36144 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
36145 continue;
36146 rdev = conf->mirrors[d].rdev;
36147- atomic_add(s, &rdev->corrected_errors);
36148+ atomic_add_unchecked(s, &rdev->corrected_errors);
36149 if (sync_page_io(rdev->bdev,
36150 sect + rdev->data_offset,
36151 s<<9,
36152@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
36153 /* Well, this device is dead */
36154 md_error(mddev, rdev);
36155 else {
36156- atomic_add(s, &rdev->corrected_errors);
36157+ atomic_add_unchecked(s, &rdev->corrected_errors);
36158 printk(KERN_INFO
36159 "raid1:%s: read error corrected "
36160 "(%d sectors at %llu on %s)\n",
36161diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36162index 1b4e232..cf0f534 100644
36163--- a/drivers/md/raid10.c
36164+++ b/drivers/md/raid10.c
36165@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
36166 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
36167 set_bit(R10BIO_Uptodate, &r10_bio->state);
36168 else {
36169- atomic_add(r10_bio->sectors,
36170+ atomic_add_unchecked(r10_bio->sectors,
36171 &conf->mirrors[d].rdev->corrected_errors);
36172 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
36173 md_error(r10_bio->mddev,
36174@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
36175 test_bit(In_sync, &rdev->flags)) {
36176 atomic_inc(&rdev->nr_pending);
36177 rcu_read_unlock();
36178- atomic_add(s, &rdev->corrected_errors);
36179+ atomic_add_unchecked(s, &rdev->corrected_errors);
36180 if (sync_page_io(rdev->bdev,
36181 r10_bio->devs[sl].addr +
36182 sect + rdev->data_offset,
36183diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36184index 883215d..675bf47 100644
36185--- a/drivers/md/raid5.c
36186+++ b/drivers/md/raid5.c
36187@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
36188 bi->bi_next = NULL;
36189 if ((rw & WRITE) &&
36190 test_bit(R5_ReWrite, &sh->dev[i].flags))
36191- atomic_add(STRIPE_SECTORS,
36192+ atomic_add_unchecked(STRIPE_SECTORS,
36193 &rdev->corrected_errors);
36194 generic_make_request(bi);
36195 } else {
36196@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
36197 clear_bit(R5_ReadError, &sh->dev[i].flags);
36198 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36199 }
36200- if (atomic_read(&conf->disks[i].rdev->read_errors))
36201- atomic_set(&conf->disks[i].rdev->read_errors, 0);
36202+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
36203+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
36204 } else {
36205 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
36206 int retry = 0;
36207 rdev = conf->disks[i].rdev;
36208
36209 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36210- atomic_inc(&rdev->read_errors);
36211+ atomic_inc_unchecked(&rdev->read_errors);
36212 if (conf->mddev->degraded >= conf->max_degraded)
36213 printk_rl(KERN_WARNING
36214 "raid5:%s: read error not correctable "
36215@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36216 (unsigned long long)(sh->sector
36217 + rdev->data_offset),
36218 bdn);
36219- else if (atomic_read(&rdev->read_errors)
36220+ else if (atomic_read_unchecked(&rdev->read_errors)
36221 > conf->max_nr_stripes)
36222 printk(KERN_WARNING
36223 "raid5:%s: Too many read errors, failing device %s.\n",
36224@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
36225 sector_t r_sector;
36226 struct stripe_head sh2;
36227
36228+ pax_track_stack();
36229
36230 chunk_offset = sector_div(new_sector, sectors_per_chunk);
36231 stripe = new_sector;
36232diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
36233index 05bde9c..2f31d40 100644
36234--- a/drivers/media/common/saa7146_hlp.c
36235+++ b/drivers/media/common/saa7146_hlp.c
36236@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
36237
36238 int x[32], y[32], w[32], h[32];
36239
36240+ pax_track_stack();
36241+
36242 /* clear out memory */
36243 memset(&line_list[0], 0x00, sizeof(u32)*32);
36244 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
36245diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36246index cb22da5..82b686e 100644
36247--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36248+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36249@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
36250 u8 buf[HOST_LINK_BUF_SIZE];
36251 int i;
36252
36253+ pax_track_stack();
36254+
36255 dprintk("%s\n", __func__);
36256
36257 /* check if we have space for a link buf in the rx_buffer */
36258@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
36259 unsigned long timeout;
36260 int written;
36261
36262+ pax_track_stack();
36263+
36264 dprintk("%s\n", __func__);
36265
36266 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
36267diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
36268index 2fe05d0..a3289c4 100644
36269--- a/drivers/media/dvb/dvb-core/dvb_demux.h
36270+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
36271@@ -71,7 +71,7 @@ struct dvb_demux_feed {
36272 union {
36273 dmx_ts_cb ts;
36274 dmx_section_cb sec;
36275- } cb;
36276+ } __no_const cb;
36277
36278 struct dvb_demux *demux;
36279 void *priv;
36280diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
36281index 94159b9..376bd8e 100644
36282--- a/drivers/media/dvb/dvb-core/dvbdev.c
36283+++ b/drivers/media/dvb/dvb-core/dvbdev.c
36284@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36285 const struct dvb_device *template, void *priv, int type)
36286 {
36287 struct dvb_device *dvbdev;
36288- struct file_operations *dvbdevfops;
36289+ file_operations_no_const *dvbdevfops;
36290 struct device *clsdev;
36291 int minor;
36292 int id;
36293diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
36294index 2a53dd0..db8c07a 100644
36295--- a/drivers/media/dvb/dvb-usb/cxusb.c
36296+++ b/drivers/media/dvb/dvb-usb/cxusb.c
36297@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36298 struct dib0700_adapter_state {
36299 int (*set_param_save) (struct dvb_frontend *,
36300 struct dvb_frontend_parameters *);
36301-};
36302+} __no_const;
36303
36304 static int dib7070_set_param_override(struct dvb_frontend *fe,
36305 struct dvb_frontend_parameters *fep)
36306diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
36307index db7f7f7..f55e96f 100644
36308--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
36309+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
36310@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
36311
36312 u8 buf[260];
36313
36314+ pax_track_stack();
36315+
36316 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
36317 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
36318
36319diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36320index 524acf5..5ffc403 100644
36321--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
36322+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36323@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
36324
36325 struct dib0700_adapter_state {
36326 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
36327-};
36328+} __no_const;
36329
36330 /* Hauppauge Nova-T 500 (aka Bristol)
36331 * has a LNA on GPIO0 which is enabled by setting 1 */
36332diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
36333index ba91735..4261d84 100644
36334--- a/drivers/media/dvb/frontends/dib3000.h
36335+++ b/drivers/media/dvb/frontends/dib3000.h
36336@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36337 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36338 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36339 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36340-};
36341+} __no_const;
36342
36343 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36344 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36345diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
36346index c709ce6..b3fe620 100644
36347--- a/drivers/media/dvb/frontends/or51211.c
36348+++ b/drivers/media/dvb/frontends/or51211.c
36349@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
36350 u8 tudata[585];
36351 int i;
36352
36353+ pax_track_stack();
36354+
36355 dprintk("Firmware is %zd bytes\n",fw->size);
36356
36357 /* Get eprom data */
36358diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36359index 482d0f3..ee1e202 100644
36360--- a/drivers/media/radio/radio-cadet.c
36361+++ b/drivers/media/radio/radio-cadet.c
36362@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36363 while (i < count && dev->rdsin != dev->rdsout)
36364 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36365
36366- if (copy_to_user(data, readbuf, i))
36367+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
36368 return -EFAULT;
36369 return i;
36370 }
36371diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
36372index 6dd51e2..0359b92 100644
36373--- a/drivers/media/video/cx18/cx18-driver.c
36374+++ b/drivers/media/video/cx18/cx18-driver.c
36375@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
36376
36377 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
36378
36379-static atomic_t cx18_instance = ATOMIC_INIT(0);
36380+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
36381
36382 /* Parameter declarations */
36383 static int cardtype[CX18_MAX_CARDS];
36384@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
36385 struct i2c_client c;
36386 u8 eedata[256];
36387
36388+ pax_track_stack();
36389+
36390 memset(&c, 0, sizeof(c));
36391 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
36392 c.adapter = &cx->i2c_adap[0];
36393@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
36394 struct cx18 *cx;
36395
36396 /* FIXME - module parameter arrays constrain max instances */
36397- i = atomic_inc_return(&cx18_instance) - 1;
36398+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
36399 if (i >= CX18_MAX_CARDS) {
36400 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
36401 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
36402diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
36403index 463ec34..2f4625a 100644
36404--- a/drivers/media/video/ivtv/ivtv-driver.c
36405+++ b/drivers/media/video/ivtv/ivtv-driver.c
36406@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
36407 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
36408
36409 /* ivtv instance counter */
36410-static atomic_t ivtv_instance = ATOMIC_INIT(0);
36411+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
36412
36413 /* Parameter declarations */
36414 static int cardtype[IVTV_MAX_CARDS];
36415diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
36416index 5fc4ac0..652a54a 100644
36417--- a/drivers/media/video/omap24xxcam.c
36418+++ b/drivers/media/video/omap24xxcam.c
36419@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
36420 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
36421
36422 do_gettimeofday(&vb->ts);
36423- vb->field_count = atomic_add_return(2, &fh->field_count);
36424+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
36425 if (csr & csr_error) {
36426 vb->state = VIDEOBUF_ERROR;
36427 if (!atomic_read(&fh->cam->in_reset)) {
36428diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
36429index 2ce67f5..cf26a5b 100644
36430--- a/drivers/media/video/omap24xxcam.h
36431+++ b/drivers/media/video/omap24xxcam.h
36432@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
36433 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
36434 struct videobuf_queue vbq;
36435 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
36436- atomic_t field_count; /* field counter for videobuf_buffer */
36437+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
36438 /* accessing cam here doesn't need serialisation: it's constant */
36439 struct omap24xxcam_device *cam;
36440 };
36441diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36442index 299afa4..eb47459 100644
36443--- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36444+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36445@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
36446 u8 *eeprom;
36447 struct tveeprom tvdata;
36448
36449+ pax_track_stack();
36450+
36451 memset(&tvdata,0,sizeof(tvdata));
36452
36453 eeprom = pvr2_eeprom_fetch(hdw);
36454diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36455index 5b152ff..3320638 100644
36456--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36457+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36458@@ -195,7 +195,7 @@ struct pvr2_hdw {
36459
36460 /* I2C stuff */
36461 struct i2c_adapter i2c_adap;
36462- struct i2c_algorithm i2c_algo;
36463+ i2c_algorithm_no_const i2c_algo;
36464 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
36465 int i2c_cx25840_hack_state;
36466 int i2c_linked;
36467diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
36468index 1eabff6..8e2313a 100644
36469--- a/drivers/media/video/saa7134/saa6752hs.c
36470+++ b/drivers/media/video/saa7134/saa6752hs.c
36471@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
36472 unsigned char localPAT[256];
36473 unsigned char localPMT[256];
36474
36475+ pax_track_stack();
36476+
36477 /* Set video format - must be done first as it resets other settings */
36478 set_reg8(client, 0x41, h->video_format);
36479
36480diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
36481index 9c1d3ac..b1b49e9 100644
36482--- a/drivers/media/video/saa7164/saa7164-cmd.c
36483+++ b/drivers/media/video/saa7164/saa7164-cmd.c
36484@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
36485 wait_queue_head_t *q = 0;
36486 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36487
36488+ pax_track_stack();
36489+
36490 /* While any outstand message on the bus exists... */
36491 do {
36492
36493@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
36494 u8 tmp[512];
36495 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36496
36497+ pax_track_stack();
36498+
36499 while (loop) {
36500
36501 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
36502diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
36503index b085496..cde0270 100644
36504--- a/drivers/media/video/usbvideo/ibmcam.c
36505+++ b/drivers/media/video/usbvideo/ibmcam.c
36506@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
36507 static int __init ibmcam_init(void)
36508 {
36509 struct usbvideo_cb cbTbl;
36510- memset(&cbTbl, 0, sizeof(cbTbl));
36511- cbTbl.probe = ibmcam_probe;
36512- cbTbl.setupOnOpen = ibmcam_setup_on_open;
36513- cbTbl.videoStart = ibmcam_video_start;
36514- cbTbl.videoStop = ibmcam_video_stop;
36515- cbTbl.processData = ibmcam_ProcessIsocData;
36516- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36517- cbTbl.adjustPicture = ibmcam_adjust_picture;
36518- cbTbl.getFPS = ibmcam_calculate_fps;
36519+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
36520+ *(void **)&cbTbl.probe = ibmcam_probe;
36521+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
36522+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
36523+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
36524+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
36525+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36526+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
36527+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
36528 return usbvideo_register(
36529 &cams,
36530 MAX_IBMCAM,
36531diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
36532index 31d57f2..600b735 100644
36533--- a/drivers/media/video/usbvideo/konicawc.c
36534+++ b/drivers/media/video/usbvideo/konicawc.c
36535@@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
36536 int error;
36537
36538 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36539- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36540+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36541
36542 cam->input = input_dev = input_allocate_device();
36543 if (!input_dev) {
36544@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
36545 struct usbvideo_cb cbTbl;
36546 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
36547 DRIVER_DESC "\n");
36548- memset(&cbTbl, 0, sizeof(cbTbl));
36549- cbTbl.probe = konicawc_probe;
36550- cbTbl.setupOnOpen = konicawc_setup_on_open;
36551- cbTbl.processData = konicawc_process_isoc;
36552- cbTbl.getFPS = konicawc_calculate_fps;
36553- cbTbl.setVideoMode = konicawc_set_video_mode;
36554- cbTbl.startDataPump = konicawc_start_data;
36555- cbTbl.stopDataPump = konicawc_stop_data;
36556- cbTbl.adjustPicture = konicawc_adjust_picture;
36557- cbTbl.userFree = konicawc_free_uvd;
36558+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
36559+ *(void **)&cbTbl.probe = konicawc_probe;
36560+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
36561+ *(void **)&cbTbl.processData = konicawc_process_isoc;
36562+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
36563+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
36564+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
36565+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
36566+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
36567+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
36568 return usbvideo_register(
36569 &cams,
36570 MAX_CAMERAS,
36571diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
36572index 803d3e4..c4d1b96 100644
36573--- a/drivers/media/video/usbvideo/quickcam_messenger.c
36574+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
36575@@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
36576 int error;
36577
36578 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36579- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36580+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36581
36582 cam->input = input_dev = input_allocate_device();
36583 if (!input_dev) {
36584diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
36585index fbd1b63..292f9f0 100644
36586--- a/drivers/media/video/usbvideo/ultracam.c
36587+++ b/drivers/media/video/usbvideo/ultracam.c
36588@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
36589 {
36590 struct usbvideo_cb cbTbl;
36591 memset(&cbTbl, 0, sizeof(cbTbl));
36592- cbTbl.probe = ultracam_probe;
36593- cbTbl.setupOnOpen = ultracam_setup_on_open;
36594- cbTbl.videoStart = ultracam_video_start;
36595- cbTbl.videoStop = ultracam_video_stop;
36596- cbTbl.processData = ultracam_ProcessIsocData;
36597- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36598- cbTbl.adjustPicture = ultracam_adjust_picture;
36599- cbTbl.getFPS = ultracam_calculate_fps;
36600+ *(void **)&cbTbl.probe = ultracam_probe;
36601+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
36602+ *(void **)&cbTbl.videoStart = ultracam_video_start;
36603+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
36604+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
36605+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36606+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
36607+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
36608 return usbvideo_register(
36609 &cams,
36610 MAX_CAMERAS,
36611diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
36612index dea8b32..34f6878 100644
36613--- a/drivers/media/video/usbvideo/usbvideo.c
36614+++ b/drivers/media/video/usbvideo/usbvideo.c
36615@@ -697,15 +697,15 @@ int usbvideo_register(
36616 __func__, cams, base_size, num_cams);
36617
36618 /* Copy callbacks, apply defaults for those that are not set */
36619- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
36620+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
36621 if (cams->cb.getFrame == NULL)
36622- cams->cb.getFrame = usbvideo_GetFrame;
36623+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
36624 if (cams->cb.disconnect == NULL)
36625- cams->cb.disconnect = usbvideo_Disconnect;
36626+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
36627 if (cams->cb.startDataPump == NULL)
36628- cams->cb.startDataPump = usbvideo_StartDataPump;
36629+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
36630 if (cams->cb.stopDataPump == NULL)
36631- cams->cb.stopDataPump = usbvideo_StopDataPump;
36632+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
36633
36634 cams->num_cameras = num_cams;
36635 cams->cam = (struct uvd *) &cams[1];
36636diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
36637index c66985b..7fa143a 100644
36638--- a/drivers/media/video/usbvideo/usbvideo.h
36639+++ b/drivers/media/video/usbvideo/usbvideo.h
36640@@ -268,7 +268,7 @@ struct usbvideo_cb {
36641 int (*startDataPump)(struct uvd *uvd);
36642 void (*stopDataPump)(struct uvd *uvd);
36643 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
36644-};
36645+} __no_const;
36646
36647 struct usbvideo {
36648 int num_cameras; /* As allocated */
36649diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
36650index e0f91e4..37554ea 100644
36651--- a/drivers/media/video/usbvision/usbvision-core.c
36652+++ b/drivers/media/video/usbvision/usbvision-core.c
36653@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
36654 unsigned char rv, gv, bv;
36655 static unsigned char *Y, *U, *V;
36656
36657+ pax_track_stack();
36658+
36659 frame = usbvision->curFrame;
36660 imageSize = frame->frmwidth * frame->frmheight;
36661 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
36662diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
36663index 0d06e7c..3d17d24 100644
36664--- a/drivers/media/video/v4l2-device.c
36665+++ b/drivers/media/video/v4l2-device.c
36666@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
36667 EXPORT_SYMBOL_GPL(v4l2_device_register);
36668
36669 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
36670- atomic_t *instance)
36671+ atomic_unchecked_t *instance)
36672 {
36673- int num = atomic_inc_return(instance) - 1;
36674+ int num = atomic_inc_return_unchecked(instance) - 1;
36675 int len = strlen(basename);
36676
36677 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
36678diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
36679index 032ebae..6a3532c 100644
36680--- a/drivers/media/video/videobuf-dma-sg.c
36681+++ b/drivers/media/video/videobuf-dma-sg.c
36682@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
36683 {
36684 struct videobuf_queue q;
36685
36686+ pax_track_stack();
36687+
36688 /* Required to make generic handler to call __videobuf_alloc */
36689 q.int_ops = &sg_ops;
36690
36691diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
36692index b6992b7..9fa7547 100644
36693--- a/drivers/message/fusion/mptbase.c
36694+++ b/drivers/message/fusion/mptbase.c
36695@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
36696 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
36697 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
36698
36699+#ifdef CONFIG_GRKERNSEC_HIDESYM
36700+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
36701+ NULL, NULL);
36702+#else
36703 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
36704 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
36705+#endif
36706+
36707 /*
36708 * Rounding UP to nearest 4-kB boundary here...
36709 */
36710diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
36711index 83873e3..e360e9a 100644
36712--- a/drivers/message/fusion/mptsas.c
36713+++ b/drivers/message/fusion/mptsas.c
36714@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
36715 return 0;
36716 }
36717
36718+static inline void
36719+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
36720+{
36721+ if (phy_info->port_details) {
36722+ phy_info->port_details->rphy = rphy;
36723+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
36724+ ioc->name, rphy));
36725+ }
36726+
36727+ if (rphy) {
36728+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
36729+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
36730+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
36731+ ioc->name, rphy, rphy->dev.release));
36732+ }
36733+}
36734+
36735 /* no mutex */
36736 static void
36737 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
36738@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
36739 return NULL;
36740 }
36741
36742-static inline void
36743-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
36744-{
36745- if (phy_info->port_details) {
36746- phy_info->port_details->rphy = rphy;
36747- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
36748- ioc->name, rphy));
36749- }
36750-
36751- if (rphy) {
36752- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
36753- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
36754- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
36755- ioc->name, rphy, rphy->dev.release));
36756- }
36757-}
36758-
36759 static inline struct sas_port *
36760 mptsas_get_port(struct mptsas_phyinfo *phy_info)
36761 {
36762diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
36763index bd096ca..332cf76 100644
36764--- a/drivers/message/fusion/mptscsih.c
36765+++ b/drivers/message/fusion/mptscsih.c
36766@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
36767
36768 h = shost_priv(SChost);
36769
36770- if (h) {
36771- if (h->info_kbuf == NULL)
36772- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
36773- return h->info_kbuf;
36774- h->info_kbuf[0] = '\0';
36775+ if (!h)
36776+ return NULL;
36777
36778- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
36779- h->info_kbuf[size-1] = '\0';
36780- }
36781+ if (h->info_kbuf == NULL)
36782+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
36783+ return h->info_kbuf;
36784+ h->info_kbuf[0] = '\0';
36785+
36786+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
36787+ h->info_kbuf[size-1] = '\0';
36788
36789 return h->info_kbuf;
36790 }
36791diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
36792index efba702..59b2c0f 100644
36793--- a/drivers/message/i2o/i2o_config.c
36794+++ b/drivers/message/i2o/i2o_config.c
36795@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
36796 struct i2o_message *msg;
36797 unsigned int iop;
36798
36799+ pax_track_stack();
36800+
36801 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
36802 return -EFAULT;
36803
36804diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
36805index 7045c45..c07b170 100644
36806--- a/drivers/message/i2o/i2o_proc.c
36807+++ b/drivers/message/i2o/i2o_proc.c
36808@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
36809 "Array Controller Device"
36810 };
36811
36812-static char *chtostr(u8 * chars, int n)
36813-{
36814- char tmp[256];
36815- tmp[0] = 0;
36816- return strncat(tmp, (char *)chars, n);
36817-}
36818-
36819 static int i2o_report_query_status(struct seq_file *seq, int block_status,
36820 char *group)
36821 {
36822@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
36823
36824 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
36825 seq_printf(seq, "%-#8x", ddm_table.module_id);
36826- seq_printf(seq, "%-29s",
36827- chtostr(ddm_table.module_name_version, 28));
36828+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
36829 seq_printf(seq, "%9d ", ddm_table.data_size);
36830 seq_printf(seq, "%8d", ddm_table.code_size);
36831
36832@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
36833
36834 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
36835 seq_printf(seq, "%-#8x", dst->module_id);
36836- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
36837- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
36838+ seq_printf(seq, "%-.28s", dst->module_name_version);
36839+ seq_printf(seq, "%-.8s", dst->date);
36840 seq_printf(seq, "%8d ", dst->module_size);
36841 seq_printf(seq, "%8d ", dst->mpb_size);
36842 seq_printf(seq, "0x%04x", dst->module_flags);
36843@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
36844 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
36845 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
36846 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
36847- seq_printf(seq, "Vendor info : %s\n",
36848- chtostr((u8 *) (work32 + 2), 16));
36849- seq_printf(seq, "Product info : %s\n",
36850- chtostr((u8 *) (work32 + 6), 16));
36851- seq_printf(seq, "Description : %s\n",
36852- chtostr((u8 *) (work32 + 10), 16));
36853- seq_printf(seq, "Product rev. : %s\n",
36854- chtostr((u8 *) (work32 + 14), 8));
36855+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
36856+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
36857+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
36858+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
36859
36860 seq_printf(seq, "Serial number : ");
36861 print_serial_number(seq, (u8 *) (work32 + 16),
36862@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
36863 }
36864
36865 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
36866- seq_printf(seq, "Module name : %s\n",
36867- chtostr(result.module_name, 24));
36868- seq_printf(seq, "Module revision : %s\n",
36869- chtostr(result.module_rev, 8));
36870+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
36871+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
36872
36873 seq_printf(seq, "Serial number : ");
36874 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
36875@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
36876 return 0;
36877 }
36878
36879- seq_printf(seq, "Device name : %s\n",
36880- chtostr(result.device_name, 64));
36881- seq_printf(seq, "Service name : %s\n",
36882- chtostr(result.service_name, 64));
36883- seq_printf(seq, "Physical name : %s\n",
36884- chtostr(result.physical_location, 64));
36885- seq_printf(seq, "Instance number : %s\n",
36886- chtostr(result.instance_number, 4));
36887+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
36888+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
36889+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
36890+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
36891
36892 return 0;
36893 }
36894diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
36895index 27cf4af..b1205b8 100644
36896--- a/drivers/message/i2o/iop.c
36897+++ b/drivers/message/i2o/iop.c
36898@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
36899
36900 spin_lock_irqsave(&c->context_list_lock, flags);
36901
36902- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
36903- atomic_inc(&c->context_list_counter);
36904+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
36905+ atomic_inc_unchecked(&c->context_list_counter);
36906
36907- entry->context = atomic_read(&c->context_list_counter);
36908+ entry->context = atomic_read_unchecked(&c->context_list_counter);
36909
36910 list_add(&entry->list, &c->context_list);
36911
36912@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
36913
36914 #if BITS_PER_LONG == 64
36915 spin_lock_init(&c->context_list_lock);
36916- atomic_set(&c->context_list_counter, 0);
36917+ atomic_set_unchecked(&c->context_list_counter, 0);
36918 INIT_LIST_HEAD(&c->context_list);
36919 #endif
36920
36921diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
36922index 78e3e85..66c9a0d 100644
36923--- a/drivers/mfd/ab3100-core.c
36924+++ b/drivers/mfd/ab3100-core.c
36925@@ -777,7 +777,7 @@ struct ab_family_id {
36926 char *name;
36927 };
36928
36929-static const struct ab_family_id ids[] __initdata = {
36930+static const struct ab_family_id ids[] __initconst = {
36931 /* AB3100 */
36932 {
36933 .id = 0xc0,
36934diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
36935index 8d8c932..8104515 100644
36936--- a/drivers/mfd/wm8350-i2c.c
36937+++ b/drivers/mfd/wm8350-i2c.c
36938@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
36939 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
36940 int ret;
36941
36942+ pax_track_stack();
36943+
36944 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
36945 return -EINVAL;
36946
36947diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
36948index e4ff50b..4cc3f04 100644
36949--- a/drivers/misc/kgdbts.c
36950+++ b/drivers/misc/kgdbts.c
36951@@ -118,7 +118,7 @@
36952 } while (0)
36953 #define MAX_CONFIG_LEN 40
36954
36955-static struct kgdb_io kgdbts_io_ops;
36956+static const struct kgdb_io kgdbts_io_ops;
36957 static char get_buf[BUFMAX];
36958 static int get_buf_cnt;
36959 static char put_buf[BUFMAX];
36960@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
36961 module_put(THIS_MODULE);
36962 }
36963
36964-static struct kgdb_io kgdbts_io_ops = {
36965+static const struct kgdb_io kgdbts_io_ops = {
36966 .name = "kgdbts",
36967 .read_char = kgdbts_get_char,
36968 .write_char = kgdbts_put_char,
36969diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
36970index 37e7cfc..67cfb76 100644
36971--- a/drivers/misc/sgi-gru/gruhandles.c
36972+++ b/drivers/misc/sgi-gru/gruhandles.c
36973@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
36974
36975 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
36976 {
36977- atomic_long_inc(&mcs_op_statistics[op].count);
36978- atomic_long_add(clks, &mcs_op_statistics[op].total);
36979+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
36980+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
36981 if (mcs_op_statistics[op].max < clks)
36982 mcs_op_statistics[op].max = clks;
36983 }
36984diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
36985index 3f2375c..467c6e6 100644
36986--- a/drivers/misc/sgi-gru/gruprocfs.c
36987+++ b/drivers/misc/sgi-gru/gruprocfs.c
36988@@ -32,9 +32,9 @@
36989
36990 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
36991
36992-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
36993+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
36994 {
36995- unsigned long val = atomic_long_read(v);
36996+ unsigned long val = atomic_long_read_unchecked(v);
36997
36998 if (val)
36999 seq_printf(s, "%16lu %s\n", val, id);
37000@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
37001 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
37002
37003 for (op = 0; op < mcsop_last; op++) {
37004- count = atomic_long_read(&mcs_op_statistics[op].count);
37005- total = atomic_long_read(&mcs_op_statistics[op].total);
37006+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
37007+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
37008 max = mcs_op_statistics[op].max;
37009 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
37010 count ? total / count : 0, max);
37011diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
37012index 46990bc..4a251b5 100644
37013--- a/drivers/misc/sgi-gru/grutables.h
37014+++ b/drivers/misc/sgi-gru/grutables.h
37015@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
37016 * GRU statistics.
37017 */
37018 struct gru_stats_s {
37019- atomic_long_t vdata_alloc;
37020- atomic_long_t vdata_free;
37021- atomic_long_t gts_alloc;
37022- atomic_long_t gts_free;
37023- atomic_long_t vdata_double_alloc;
37024- atomic_long_t gts_double_allocate;
37025- atomic_long_t assign_context;
37026- atomic_long_t assign_context_failed;
37027- atomic_long_t free_context;
37028- atomic_long_t load_user_context;
37029- atomic_long_t load_kernel_context;
37030- atomic_long_t lock_kernel_context;
37031- atomic_long_t unlock_kernel_context;
37032- atomic_long_t steal_user_context;
37033- atomic_long_t steal_kernel_context;
37034- atomic_long_t steal_context_failed;
37035- atomic_long_t nopfn;
37036- atomic_long_t break_cow;
37037- atomic_long_t asid_new;
37038- atomic_long_t asid_next;
37039- atomic_long_t asid_wrap;
37040- atomic_long_t asid_reuse;
37041- atomic_long_t intr;
37042- atomic_long_t intr_mm_lock_failed;
37043- atomic_long_t call_os;
37044- atomic_long_t call_os_offnode_reference;
37045- atomic_long_t call_os_check_for_bug;
37046- atomic_long_t call_os_wait_queue;
37047- atomic_long_t user_flush_tlb;
37048- atomic_long_t user_unload_context;
37049- atomic_long_t user_exception;
37050- atomic_long_t set_context_option;
37051- atomic_long_t migrate_check;
37052- atomic_long_t migrated_retarget;
37053- atomic_long_t migrated_unload;
37054- atomic_long_t migrated_unload_delay;
37055- atomic_long_t migrated_nopfn_retarget;
37056- atomic_long_t migrated_nopfn_unload;
37057- atomic_long_t tlb_dropin;
37058- atomic_long_t tlb_dropin_fail_no_asid;
37059- atomic_long_t tlb_dropin_fail_upm;
37060- atomic_long_t tlb_dropin_fail_invalid;
37061- atomic_long_t tlb_dropin_fail_range_active;
37062- atomic_long_t tlb_dropin_fail_idle;
37063- atomic_long_t tlb_dropin_fail_fmm;
37064- atomic_long_t tlb_dropin_fail_no_exception;
37065- atomic_long_t tlb_dropin_fail_no_exception_war;
37066- atomic_long_t tfh_stale_on_fault;
37067- atomic_long_t mmu_invalidate_range;
37068- atomic_long_t mmu_invalidate_page;
37069- atomic_long_t mmu_clear_flush_young;
37070- atomic_long_t flush_tlb;
37071- atomic_long_t flush_tlb_gru;
37072- atomic_long_t flush_tlb_gru_tgh;
37073- atomic_long_t flush_tlb_gru_zero_asid;
37074-
37075- atomic_long_t copy_gpa;
37076-
37077- atomic_long_t mesq_receive;
37078- atomic_long_t mesq_receive_none;
37079- atomic_long_t mesq_send;
37080- atomic_long_t mesq_send_failed;
37081- atomic_long_t mesq_noop;
37082- atomic_long_t mesq_send_unexpected_error;
37083- atomic_long_t mesq_send_lb_overflow;
37084- atomic_long_t mesq_send_qlimit_reached;
37085- atomic_long_t mesq_send_amo_nacked;
37086- atomic_long_t mesq_send_put_nacked;
37087- atomic_long_t mesq_qf_not_full;
37088- atomic_long_t mesq_qf_locked;
37089- atomic_long_t mesq_qf_noop_not_full;
37090- atomic_long_t mesq_qf_switch_head_failed;
37091- atomic_long_t mesq_qf_unexpected_error;
37092- atomic_long_t mesq_noop_unexpected_error;
37093- atomic_long_t mesq_noop_lb_overflow;
37094- atomic_long_t mesq_noop_qlimit_reached;
37095- atomic_long_t mesq_noop_amo_nacked;
37096- atomic_long_t mesq_noop_put_nacked;
37097+ atomic_long_unchecked_t vdata_alloc;
37098+ atomic_long_unchecked_t vdata_free;
37099+ atomic_long_unchecked_t gts_alloc;
37100+ atomic_long_unchecked_t gts_free;
37101+ atomic_long_unchecked_t vdata_double_alloc;
37102+ atomic_long_unchecked_t gts_double_allocate;
37103+ atomic_long_unchecked_t assign_context;
37104+ atomic_long_unchecked_t assign_context_failed;
37105+ atomic_long_unchecked_t free_context;
37106+ atomic_long_unchecked_t load_user_context;
37107+ atomic_long_unchecked_t load_kernel_context;
37108+ atomic_long_unchecked_t lock_kernel_context;
37109+ atomic_long_unchecked_t unlock_kernel_context;
37110+ atomic_long_unchecked_t steal_user_context;
37111+ atomic_long_unchecked_t steal_kernel_context;
37112+ atomic_long_unchecked_t steal_context_failed;
37113+ atomic_long_unchecked_t nopfn;
37114+ atomic_long_unchecked_t break_cow;
37115+ atomic_long_unchecked_t asid_new;
37116+ atomic_long_unchecked_t asid_next;
37117+ atomic_long_unchecked_t asid_wrap;
37118+ atomic_long_unchecked_t asid_reuse;
37119+ atomic_long_unchecked_t intr;
37120+ atomic_long_unchecked_t intr_mm_lock_failed;
37121+ atomic_long_unchecked_t call_os;
37122+ atomic_long_unchecked_t call_os_offnode_reference;
37123+ atomic_long_unchecked_t call_os_check_for_bug;
37124+ atomic_long_unchecked_t call_os_wait_queue;
37125+ atomic_long_unchecked_t user_flush_tlb;
37126+ atomic_long_unchecked_t user_unload_context;
37127+ atomic_long_unchecked_t user_exception;
37128+ atomic_long_unchecked_t set_context_option;
37129+ atomic_long_unchecked_t migrate_check;
37130+ atomic_long_unchecked_t migrated_retarget;
37131+ atomic_long_unchecked_t migrated_unload;
37132+ atomic_long_unchecked_t migrated_unload_delay;
37133+ atomic_long_unchecked_t migrated_nopfn_retarget;
37134+ atomic_long_unchecked_t migrated_nopfn_unload;
37135+ atomic_long_unchecked_t tlb_dropin;
37136+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37137+ atomic_long_unchecked_t tlb_dropin_fail_upm;
37138+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
37139+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
37140+ atomic_long_unchecked_t tlb_dropin_fail_idle;
37141+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
37142+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37143+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
37144+ atomic_long_unchecked_t tfh_stale_on_fault;
37145+ atomic_long_unchecked_t mmu_invalidate_range;
37146+ atomic_long_unchecked_t mmu_invalidate_page;
37147+ atomic_long_unchecked_t mmu_clear_flush_young;
37148+ atomic_long_unchecked_t flush_tlb;
37149+ atomic_long_unchecked_t flush_tlb_gru;
37150+ atomic_long_unchecked_t flush_tlb_gru_tgh;
37151+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37152+
37153+ atomic_long_unchecked_t copy_gpa;
37154+
37155+ atomic_long_unchecked_t mesq_receive;
37156+ atomic_long_unchecked_t mesq_receive_none;
37157+ atomic_long_unchecked_t mesq_send;
37158+ atomic_long_unchecked_t mesq_send_failed;
37159+ atomic_long_unchecked_t mesq_noop;
37160+ atomic_long_unchecked_t mesq_send_unexpected_error;
37161+ atomic_long_unchecked_t mesq_send_lb_overflow;
37162+ atomic_long_unchecked_t mesq_send_qlimit_reached;
37163+ atomic_long_unchecked_t mesq_send_amo_nacked;
37164+ atomic_long_unchecked_t mesq_send_put_nacked;
37165+ atomic_long_unchecked_t mesq_qf_not_full;
37166+ atomic_long_unchecked_t mesq_qf_locked;
37167+ atomic_long_unchecked_t mesq_qf_noop_not_full;
37168+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
37169+ atomic_long_unchecked_t mesq_qf_unexpected_error;
37170+ atomic_long_unchecked_t mesq_noop_unexpected_error;
37171+ atomic_long_unchecked_t mesq_noop_lb_overflow;
37172+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
37173+ atomic_long_unchecked_t mesq_noop_amo_nacked;
37174+ atomic_long_unchecked_t mesq_noop_put_nacked;
37175
37176 };
37177
37178@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37179 cchop_deallocate, tghop_invalidate, mcsop_last};
37180
37181 struct mcs_op_statistic {
37182- atomic_long_t count;
37183- atomic_long_t total;
37184+ atomic_long_unchecked_t count;
37185+ atomic_long_unchecked_t total;
37186 unsigned long max;
37187 };
37188
37189@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37190
37191 #define STAT(id) do { \
37192 if (gru_options & OPT_STATS) \
37193- atomic_long_inc(&gru_stats.id); \
37194+ atomic_long_inc_unchecked(&gru_stats.id); \
37195 } while (0)
37196
37197 #ifdef CONFIG_SGI_GRU_DEBUG
37198diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37199index 2275126..12a9dbfb 100644
37200--- a/drivers/misc/sgi-xp/xp.h
37201+++ b/drivers/misc/sgi-xp/xp.h
37202@@ -289,7 +289,7 @@ struct xpc_interface {
37203 xpc_notify_func, void *);
37204 void (*received) (short, int, void *);
37205 enum xp_retval (*partid_to_nasids) (short, void *);
37206-};
37207+} __no_const;
37208
37209 extern struct xpc_interface xpc_interface;
37210
37211diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37212index b94d5f7..7f494c5 100644
37213--- a/drivers/misc/sgi-xp/xpc.h
37214+++ b/drivers/misc/sgi-xp/xpc.h
37215@@ -835,6 +835,7 @@ struct xpc_arch_operations {
37216 void (*received_payload) (struct xpc_channel *, void *);
37217 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37218 };
37219+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37220
37221 /* struct xpc_partition act_state values (for XPC HB) */
37222
37223@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37224 /* found in xpc_main.c */
37225 extern struct device *xpc_part;
37226 extern struct device *xpc_chan;
37227-extern struct xpc_arch_operations xpc_arch_ops;
37228+extern xpc_arch_operations_no_const xpc_arch_ops;
37229 extern int xpc_disengage_timelimit;
37230 extern int xpc_disengage_timedout;
37231 extern int xpc_activate_IRQ_rcvd;
37232diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37233index fd3688a..7e211a4 100644
37234--- a/drivers/misc/sgi-xp/xpc_main.c
37235+++ b/drivers/misc/sgi-xp/xpc_main.c
37236@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
37237 .notifier_call = xpc_system_die,
37238 };
37239
37240-struct xpc_arch_operations xpc_arch_ops;
37241+xpc_arch_operations_no_const xpc_arch_ops;
37242
37243 /*
37244 * Timer function to enforce the timelimit on the partition disengage.
37245diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
37246index 8b70e03..700bda6 100644
37247--- a/drivers/misc/sgi-xp/xpc_sn2.c
37248+++ b/drivers/misc/sgi-xp/xpc_sn2.c
37249@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
37250 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
37251 }
37252
37253-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
37254+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
37255 .setup_partitions = xpc_setup_partitions_sn2,
37256 .teardown_partitions = xpc_teardown_partitions_sn2,
37257 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
37258@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
37259 int ret;
37260 size_t buf_size;
37261
37262- xpc_arch_ops = xpc_arch_ops_sn2;
37263+ pax_open_kernel();
37264+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
37265+ pax_close_kernel();
37266
37267 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
37268 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
37269diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
37270index 8e08d71..7cb8c9b 100644
37271--- a/drivers/misc/sgi-xp/xpc_uv.c
37272+++ b/drivers/misc/sgi-xp/xpc_uv.c
37273@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
37274 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
37275 }
37276
37277-static struct xpc_arch_operations xpc_arch_ops_uv = {
37278+static const struct xpc_arch_operations xpc_arch_ops_uv = {
37279 .setup_partitions = xpc_setup_partitions_uv,
37280 .teardown_partitions = xpc_teardown_partitions_uv,
37281 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
37282@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
37283 int
37284 xpc_init_uv(void)
37285 {
37286- xpc_arch_ops = xpc_arch_ops_uv;
37287+ pax_open_kernel();
37288+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
37289+ pax_close_kernel();
37290
37291 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
37292 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
37293diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
37294index 6fd20b42..650efe3 100644
37295--- a/drivers/mmc/host/sdhci-pci.c
37296+++ b/drivers/mmc/host/sdhci-pci.c
37297@@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
37298 .probe = via_probe,
37299 };
37300
37301-static const struct pci_device_id pci_ids[] __devinitdata = {
37302+static const struct pci_device_id pci_ids[] __devinitconst = {
37303 {
37304 .vendor = PCI_VENDOR_ID_RICOH,
37305 .device = PCI_DEVICE_ID_RICOH_R5C822,
37306diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
37307index e7563a9..5f90ce5 100644
37308--- a/drivers/mtd/chips/cfi_cmdset_0001.c
37309+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
37310@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
37311 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
37312 unsigned long timeo = jiffies + HZ;
37313
37314+ pax_track_stack();
37315+
37316 /* Prevent setting state FL_SYNCING for chip in suspended state. */
37317 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
37318 goto sleep;
37319@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
37320 unsigned long initial_adr;
37321 int initial_len = len;
37322
37323+ pax_track_stack();
37324+
37325 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
37326 adr += chip->start;
37327 initial_adr = adr;
37328@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
37329 int retries = 3;
37330 int ret;
37331
37332+ pax_track_stack();
37333+
37334 adr += chip->start;
37335
37336 retry:
37337diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
37338index 0667a67..3ab97ed 100644
37339--- a/drivers/mtd/chips/cfi_cmdset_0020.c
37340+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
37341@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
37342 unsigned long cmd_addr;
37343 struct cfi_private *cfi = map->fldrv_priv;
37344
37345+ pax_track_stack();
37346+
37347 adr += chip->start;
37348
37349 /* Ensure cmd read/writes are aligned. */
37350@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
37351 DECLARE_WAITQUEUE(wait, current);
37352 int wbufsize, z;
37353
37354+ pax_track_stack();
37355+
37356 /* M58LW064A requires bus alignment for buffer wriets -- saw */
37357 if (adr & (map_bankwidth(map)-1))
37358 return -EINVAL;
37359@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
37360 DECLARE_WAITQUEUE(wait, current);
37361 int ret = 0;
37362
37363+ pax_track_stack();
37364+
37365 adr += chip->start;
37366
37367 /* Let's determine this according to the interleave only once */
37368@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
37369 unsigned long timeo = jiffies + HZ;
37370 DECLARE_WAITQUEUE(wait, current);
37371
37372+ pax_track_stack();
37373+
37374 adr += chip->start;
37375
37376 /* Let's determine this according to the interleave only once */
37377@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
37378 unsigned long timeo = jiffies + HZ;
37379 DECLARE_WAITQUEUE(wait, current);
37380
37381+ pax_track_stack();
37382+
37383 adr += chip->start;
37384
37385 /* Let's determine this according to the interleave only once */
37386diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37387index 5bf5f46..c5de373 100644
37388--- a/drivers/mtd/devices/doc2000.c
37389+++ b/drivers/mtd/devices/doc2000.c
37390@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37391
37392 /* The ECC will not be calculated correctly if less than 512 is written */
37393 /* DBB-
37394- if (len != 0x200 && eccbuf)
37395+ if (len != 0x200)
37396 printk(KERN_WARNING
37397 "ECC needs a full sector write (adr: %lx size %lx)\n",
37398 (long) to, (long) len);
37399diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
37400index 0990f78..bb4e8a4 100644
37401--- a/drivers/mtd/devices/doc2001.c
37402+++ b/drivers/mtd/devices/doc2001.c
37403@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
37404 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
37405
37406 /* Don't allow read past end of device */
37407- if (from >= this->totlen)
37408+ if (from >= this->totlen || !len)
37409 return -EINVAL;
37410
37411 /* Don't allow a single read to cross a 512-byte block boundary */
37412diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
37413index e56d6b4..f07e6cf 100644
37414--- a/drivers/mtd/ftl.c
37415+++ b/drivers/mtd/ftl.c
37416@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
37417 loff_t offset;
37418 uint16_t srcunitswap = cpu_to_le16(srcunit);
37419
37420+ pax_track_stack();
37421+
37422 eun = &part->EUNInfo[srcunit];
37423 xfer = &part->XferInfo[xferunit];
37424 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
37425diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
37426index 8aca552..146446e 100755
37427--- a/drivers/mtd/inftlcore.c
37428+++ b/drivers/mtd/inftlcore.c
37429@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
37430 struct inftl_oob oob;
37431 size_t retlen;
37432
37433+ pax_track_stack();
37434+
37435 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
37436 "pending=%d)\n", inftl, thisVUC, pendingblock);
37437
37438diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
37439index 32e82ae..ed50953 100644
37440--- a/drivers/mtd/inftlmount.c
37441+++ b/drivers/mtd/inftlmount.c
37442@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
37443 struct INFTLPartition *ip;
37444 size_t retlen;
37445
37446+ pax_track_stack();
37447+
37448 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
37449
37450 /*
37451diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
37452index 79bf40f..fe5f8fd 100644
37453--- a/drivers/mtd/lpddr/qinfo_probe.c
37454+++ b/drivers/mtd/lpddr/qinfo_probe.c
37455@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
37456 {
37457 map_word pfow_val[4];
37458
37459+ pax_track_stack();
37460+
37461 /* Check identification string */
37462 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
37463 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
37464diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
37465index 726a1b8..f46b460 100644
37466--- a/drivers/mtd/mtdchar.c
37467+++ b/drivers/mtd/mtdchar.c
37468@@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
37469 u_long size;
37470 struct mtd_info_user info;
37471
37472+ pax_track_stack();
37473+
37474 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
37475
37476 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
37477diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
37478index 1002e18..26d82d5 100644
37479--- a/drivers/mtd/nftlcore.c
37480+++ b/drivers/mtd/nftlcore.c
37481@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
37482 int inplace = 1;
37483 size_t retlen;
37484
37485+ pax_track_stack();
37486+
37487 memset(BlockMap, 0xff, sizeof(BlockMap));
37488 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
37489
37490diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
37491index 8b22b18..6fada85 100644
37492--- a/drivers/mtd/nftlmount.c
37493+++ b/drivers/mtd/nftlmount.c
37494@@ -23,6 +23,7 @@
37495 #include <asm/errno.h>
37496 #include <linux/delay.h>
37497 #include <linux/slab.h>
37498+#include <linux/sched.h>
37499 #include <linux/mtd/mtd.h>
37500 #include <linux/mtd/nand.h>
37501 #include <linux/mtd/nftl.h>
37502@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
37503 struct mtd_info *mtd = nftl->mbd.mtd;
37504 unsigned int i;
37505
37506+ pax_track_stack();
37507+
37508 /* Assume logical EraseSize == physical erasesize for starting the scan.
37509 We'll sort it out later if we find a MediaHeader which says otherwise */
37510 /* Actually, we won't. The new DiskOnChip driver has already scanned
37511diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
37512index 14cec04..d775b87 100644
37513--- a/drivers/mtd/ubi/build.c
37514+++ b/drivers/mtd/ubi/build.c
37515@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
37516 static int __init bytes_str_to_int(const char *str)
37517 {
37518 char *endp;
37519- unsigned long result;
37520+ unsigned long result, scale = 1;
37521
37522 result = simple_strtoul(str, &endp, 0);
37523 if (str == endp || result >= INT_MAX) {
37524@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
37525
37526 switch (*endp) {
37527 case 'G':
37528- result *= 1024;
37529+ scale *= 1024;
37530 case 'M':
37531- result *= 1024;
37532+ scale *= 1024;
37533 case 'K':
37534- result *= 1024;
37535+ scale *= 1024;
37536 if (endp[1] == 'i' && endp[2] == 'B')
37537 endp += 2;
37538 case '\0':
37539@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
37540 return -EINVAL;
37541 }
37542
37543- return result;
37544+ if ((intoverflow_t)result*scale >= INT_MAX) {
37545+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
37546+ str);
37547+ return -EINVAL;
37548+ }
37549+
37550+ return result*scale;
37551 }
37552
37553 /**
37554diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
37555index ab68886..ca405e8 100644
37556--- a/drivers/net/atlx/atl2.c
37557+++ b/drivers/net/atlx/atl2.c
37558@@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
37559 */
37560
37561 #define ATL2_PARAM(X, desc) \
37562- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37563+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37564 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
37565 MODULE_PARM_DESC(X, desc);
37566 #else
37567diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
37568index 4874b2b..67f8526 100644
37569--- a/drivers/net/bnx2.c
37570+++ b/drivers/net/bnx2.c
37571@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
37572 int rc = 0;
37573 u32 magic, csum;
37574
37575+ pax_track_stack();
37576+
37577 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
37578 goto test_nvram_done;
37579
37580diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
37581index fd3eb07..8a6978d 100644
37582--- a/drivers/net/cxgb3/l2t.h
37583+++ b/drivers/net/cxgb3/l2t.h
37584@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
37585 */
37586 struct l2t_skb_cb {
37587 arp_failure_handler_func arp_failure_handler;
37588-};
37589+} __no_const;
37590
37591 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
37592
37593diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
37594index 032cfe0..411af379 100644
37595--- a/drivers/net/cxgb3/t3_hw.c
37596+++ b/drivers/net/cxgb3/t3_hw.c
37597@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
37598 int i, addr, ret;
37599 struct t3_vpd vpd;
37600
37601+ pax_track_stack();
37602+
37603 /*
37604 * Card information is normally at VPD_BASE but some early cards had
37605 * it at 0.
37606diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
37607index d1e0563..b9e129c 100644
37608--- a/drivers/net/e1000e/82571.c
37609+++ b/drivers/net/e1000e/82571.c
37610@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
37611 {
37612 struct e1000_hw *hw = &adapter->hw;
37613 struct e1000_mac_info *mac = &hw->mac;
37614- struct e1000_mac_operations *func = &mac->ops;
37615+ e1000_mac_operations_no_const *func = &mac->ops;
37616 u32 swsm = 0;
37617 u32 swsm2 = 0;
37618 bool force_clear_smbi = false;
37619@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
37620 temp = er32(ICRXDMTC);
37621 }
37622
37623-static struct e1000_mac_operations e82571_mac_ops = {
37624+static const struct e1000_mac_operations e82571_mac_ops = {
37625 /* .check_mng_mode: mac type dependent */
37626 /* .check_for_link: media type dependent */
37627 .id_led_init = e1000e_id_led_init,
37628@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
37629 .setup_led = e1000e_setup_led_generic,
37630 };
37631
37632-static struct e1000_phy_operations e82_phy_ops_igp = {
37633+static const struct e1000_phy_operations e82_phy_ops_igp = {
37634 .acquire_phy = e1000_get_hw_semaphore_82571,
37635 .check_reset_block = e1000e_check_reset_block_generic,
37636 .commit_phy = NULL,
37637@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
37638 .cfg_on_link_up = NULL,
37639 };
37640
37641-static struct e1000_phy_operations e82_phy_ops_m88 = {
37642+static const struct e1000_phy_operations e82_phy_ops_m88 = {
37643 .acquire_phy = e1000_get_hw_semaphore_82571,
37644 .check_reset_block = e1000e_check_reset_block_generic,
37645 .commit_phy = e1000e_phy_sw_reset,
37646@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
37647 .cfg_on_link_up = NULL,
37648 };
37649
37650-static struct e1000_phy_operations e82_phy_ops_bm = {
37651+static const struct e1000_phy_operations e82_phy_ops_bm = {
37652 .acquire_phy = e1000_get_hw_semaphore_82571,
37653 .check_reset_block = e1000e_check_reset_block_generic,
37654 .commit_phy = e1000e_phy_sw_reset,
37655@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
37656 .cfg_on_link_up = NULL,
37657 };
37658
37659-static struct e1000_nvm_operations e82571_nvm_ops = {
37660+static const struct e1000_nvm_operations e82571_nvm_ops = {
37661 .acquire_nvm = e1000_acquire_nvm_82571,
37662 .read_nvm = e1000e_read_nvm_eerd,
37663 .release_nvm = e1000_release_nvm_82571,
37664diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
37665index 47db9bd..fa58ccd 100644
37666--- a/drivers/net/e1000e/e1000.h
37667+++ b/drivers/net/e1000e/e1000.h
37668@@ -375,9 +375,9 @@ struct e1000_info {
37669 u32 pba;
37670 u32 max_hw_frame_size;
37671 s32 (*get_variants)(struct e1000_adapter *);
37672- struct e1000_mac_operations *mac_ops;
37673- struct e1000_phy_operations *phy_ops;
37674- struct e1000_nvm_operations *nvm_ops;
37675+ const struct e1000_mac_operations *mac_ops;
37676+ const struct e1000_phy_operations *phy_ops;
37677+ const struct e1000_nvm_operations *nvm_ops;
37678 };
37679
37680 /* hardware capability, feature, and workaround flags */
37681diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
37682index ae5d736..e9a93a1 100644
37683--- a/drivers/net/e1000e/es2lan.c
37684+++ b/drivers/net/e1000e/es2lan.c
37685@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
37686 {
37687 struct e1000_hw *hw = &adapter->hw;
37688 struct e1000_mac_info *mac = &hw->mac;
37689- struct e1000_mac_operations *func = &mac->ops;
37690+ e1000_mac_operations_no_const *func = &mac->ops;
37691
37692 /* Set media type */
37693 switch (adapter->pdev->device) {
37694@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
37695 temp = er32(ICRXDMTC);
37696 }
37697
37698-static struct e1000_mac_operations es2_mac_ops = {
37699+static const struct e1000_mac_operations es2_mac_ops = {
37700 .id_led_init = e1000e_id_led_init,
37701 .check_mng_mode = e1000e_check_mng_mode_generic,
37702 /* check_for_link dependent on media type */
37703@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
37704 .setup_led = e1000e_setup_led_generic,
37705 };
37706
37707-static struct e1000_phy_operations es2_phy_ops = {
37708+static const struct e1000_phy_operations es2_phy_ops = {
37709 .acquire_phy = e1000_acquire_phy_80003es2lan,
37710 .check_reset_block = e1000e_check_reset_block_generic,
37711 .commit_phy = e1000e_phy_sw_reset,
37712@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
37713 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
37714 };
37715
37716-static struct e1000_nvm_operations es2_nvm_ops = {
37717+static const struct e1000_nvm_operations es2_nvm_ops = {
37718 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
37719 .read_nvm = e1000e_read_nvm_eerd,
37720 .release_nvm = e1000_release_nvm_80003es2lan,
37721diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
37722index 11f3b7c..6381887 100644
37723--- a/drivers/net/e1000e/hw.h
37724+++ b/drivers/net/e1000e/hw.h
37725@@ -753,6 +753,7 @@ struct e1000_mac_operations {
37726 s32 (*setup_physical_interface)(struct e1000_hw *);
37727 s32 (*setup_led)(struct e1000_hw *);
37728 };
37729+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
37730
37731 /* Function pointers for the PHY. */
37732 struct e1000_phy_operations {
37733@@ -774,6 +775,7 @@ struct e1000_phy_operations {
37734 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
37735 s32 (*cfg_on_link_up)(struct e1000_hw *);
37736 };
37737+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
37738
37739 /* Function pointers for the NVM. */
37740 struct e1000_nvm_operations {
37741@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
37742 s32 (*validate_nvm)(struct e1000_hw *);
37743 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
37744 };
37745+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
37746
37747 struct e1000_mac_info {
37748- struct e1000_mac_operations ops;
37749+ e1000_mac_operations_no_const ops;
37750
37751 u8 addr[6];
37752 u8 perm_addr[6];
37753@@ -823,7 +826,7 @@ struct e1000_mac_info {
37754 };
37755
37756 struct e1000_phy_info {
37757- struct e1000_phy_operations ops;
37758+ e1000_phy_operations_no_const ops;
37759
37760 enum e1000_phy_type type;
37761
37762@@ -857,7 +860,7 @@ struct e1000_phy_info {
37763 };
37764
37765 struct e1000_nvm_info {
37766- struct e1000_nvm_operations ops;
37767+ e1000_nvm_operations_no_const ops;
37768
37769 enum e1000_nvm_type type;
37770 enum e1000_nvm_override override;
37771diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
37772index de39f9a..e28d3e0 100644
37773--- a/drivers/net/e1000e/ich8lan.c
37774+++ b/drivers/net/e1000e/ich8lan.c
37775@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
37776 }
37777 }
37778
37779-static struct e1000_mac_operations ich8_mac_ops = {
37780+static const struct e1000_mac_operations ich8_mac_ops = {
37781 .id_led_init = e1000e_id_led_init,
37782 .check_mng_mode = e1000_check_mng_mode_ich8lan,
37783 .check_for_link = e1000_check_for_copper_link_ich8lan,
37784@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
37785 /* id_led_init dependent on mac type */
37786 };
37787
37788-static struct e1000_phy_operations ich8_phy_ops = {
37789+static const struct e1000_phy_operations ich8_phy_ops = {
37790 .acquire_phy = e1000_acquire_swflag_ich8lan,
37791 .check_reset_block = e1000_check_reset_block_ich8lan,
37792 .commit_phy = NULL,
37793@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
37794 .write_phy_reg = e1000e_write_phy_reg_igp,
37795 };
37796
37797-static struct e1000_nvm_operations ich8_nvm_ops = {
37798+static const struct e1000_nvm_operations ich8_nvm_ops = {
37799 .acquire_nvm = e1000_acquire_nvm_ich8lan,
37800 .read_nvm = e1000_read_nvm_ich8lan,
37801 .release_nvm = e1000_release_nvm_ich8lan,
37802diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
37803index 18d5fbb..542d96d 100644
37804--- a/drivers/net/fealnx.c
37805+++ b/drivers/net/fealnx.c
37806@@ -151,7 +151,7 @@ struct chip_info {
37807 int flags;
37808 };
37809
37810-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
37811+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
37812 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
37813 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
37814 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
37815diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
37816index 0e5b54b..b503f82 100644
37817--- a/drivers/net/hamradio/6pack.c
37818+++ b/drivers/net/hamradio/6pack.c
37819@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
37820 unsigned char buf[512];
37821 int count1;
37822
37823+ pax_track_stack();
37824+
37825 if (!count)
37826 return;
37827
37828diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
37829index 5862282..7cce8cb 100644
37830--- a/drivers/net/ibmveth.c
37831+++ b/drivers/net/ibmveth.c
37832@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
37833 NULL,
37834 };
37835
37836-static struct sysfs_ops veth_pool_ops = {
37837+static const struct sysfs_ops veth_pool_ops = {
37838 .show = veth_pool_show,
37839 .store = veth_pool_store,
37840 };
37841diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
37842index d617f2d..57b5309 100644
37843--- a/drivers/net/igb/e1000_82575.c
37844+++ b/drivers/net/igb/e1000_82575.c
37845@@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
37846 wr32(E1000_VT_CTL, vt_ctl);
37847 }
37848
37849-static struct e1000_mac_operations e1000_mac_ops_82575 = {
37850+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
37851 .reset_hw = igb_reset_hw_82575,
37852 .init_hw = igb_init_hw_82575,
37853 .check_for_link = igb_check_for_link_82575,
37854@@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
37855 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
37856 };
37857
37858-static struct e1000_phy_operations e1000_phy_ops_82575 = {
37859+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
37860 .acquire = igb_acquire_phy_82575,
37861 .get_cfg_done = igb_get_cfg_done_82575,
37862 .release = igb_release_phy_82575,
37863 };
37864
37865-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
37866+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
37867 .acquire = igb_acquire_nvm_82575,
37868 .read = igb_read_nvm_eerd,
37869 .release = igb_release_nvm_82575,
37870diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
37871index 72081df..d855cf5 100644
37872--- a/drivers/net/igb/e1000_hw.h
37873+++ b/drivers/net/igb/e1000_hw.h
37874@@ -288,6 +288,7 @@ struct e1000_mac_operations {
37875 s32 (*read_mac_addr)(struct e1000_hw *);
37876 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
37877 };
37878+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
37879
37880 struct e1000_phy_operations {
37881 s32 (*acquire)(struct e1000_hw *);
37882@@ -303,6 +304,7 @@ struct e1000_phy_operations {
37883 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
37884 s32 (*write_reg)(struct e1000_hw *, u32, u16);
37885 };
37886+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
37887
37888 struct e1000_nvm_operations {
37889 s32 (*acquire)(struct e1000_hw *);
37890@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
37891 void (*release)(struct e1000_hw *);
37892 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
37893 };
37894+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
37895
37896 struct e1000_info {
37897 s32 (*get_invariants)(struct e1000_hw *);
37898@@ -321,7 +324,7 @@ struct e1000_info {
37899 extern const struct e1000_info e1000_82575_info;
37900
37901 struct e1000_mac_info {
37902- struct e1000_mac_operations ops;
37903+ e1000_mac_operations_no_const ops;
37904
37905 u8 addr[6];
37906 u8 perm_addr[6];
37907@@ -365,7 +368,7 @@ struct e1000_mac_info {
37908 };
37909
37910 struct e1000_phy_info {
37911- struct e1000_phy_operations ops;
37912+ e1000_phy_operations_no_const ops;
37913
37914 enum e1000_phy_type type;
37915
37916@@ -400,7 +403,7 @@ struct e1000_phy_info {
37917 };
37918
37919 struct e1000_nvm_info {
37920- struct e1000_nvm_operations ops;
37921+ e1000_nvm_operations_no_const ops;
37922
37923 enum e1000_nvm_type type;
37924 enum e1000_nvm_override override;
37925@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
37926 s32 (*check_for_ack)(struct e1000_hw *, u16);
37927 s32 (*check_for_rst)(struct e1000_hw *, u16);
37928 };
37929+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
37930
37931 struct e1000_mbx_stats {
37932 u32 msgs_tx;
37933@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
37934 };
37935
37936 struct e1000_mbx_info {
37937- struct e1000_mbx_operations ops;
37938+ e1000_mbx_operations_no_const ops;
37939 struct e1000_mbx_stats stats;
37940 u32 timeout;
37941 u32 usec_delay;
37942diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
37943index 1e8ce37..549c453 100644
37944--- a/drivers/net/igbvf/vf.h
37945+++ b/drivers/net/igbvf/vf.h
37946@@ -187,9 +187,10 @@ struct e1000_mac_operations {
37947 s32 (*read_mac_addr)(struct e1000_hw *);
37948 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
37949 };
37950+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
37951
37952 struct e1000_mac_info {
37953- struct e1000_mac_operations ops;
37954+ e1000_mac_operations_no_const ops;
37955 u8 addr[6];
37956 u8 perm_addr[6];
37957
37958@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
37959 s32 (*check_for_ack)(struct e1000_hw *);
37960 s32 (*check_for_rst)(struct e1000_hw *);
37961 };
37962+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
37963
37964 struct e1000_mbx_stats {
37965 u32 msgs_tx;
37966@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
37967 };
37968
37969 struct e1000_mbx_info {
37970- struct e1000_mbx_operations ops;
37971+ e1000_mbx_operations_no_const ops;
37972 struct e1000_mbx_stats stats;
37973 u32 timeout;
37974 u32 usec_delay;
37975diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
37976index aa7286b..a61394f 100644
37977--- a/drivers/net/iseries_veth.c
37978+++ b/drivers/net/iseries_veth.c
37979@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
37980 NULL
37981 };
37982
37983-static struct sysfs_ops veth_cnx_sysfs_ops = {
37984+static const struct sysfs_ops veth_cnx_sysfs_ops = {
37985 .show = veth_cnx_attribute_show
37986 };
37987
37988@@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
37989 NULL
37990 };
37991
37992-static struct sysfs_ops veth_port_sysfs_ops = {
37993+static const struct sysfs_ops veth_port_sysfs_ops = {
37994 .show = veth_port_attribute_show
37995 };
37996
37997diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
37998index 8aa44dc..fa1e797 100644
37999--- a/drivers/net/ixgb/ixgb_main.c
38000+++ b/drivers/net/ixgb/ixgb_main.c
38001@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
38002 u32 rctl;
38003 int i;
38004
38005+ pax_track_stack();
38006+
38007 /* Check for Promiscuous and All Multicast modes */
38008
38009 rctl = IXGB_READ_REG(hw, RCTL);
38010diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
38011index af35e1d..8781785 100644
38012--- a/drivers/net/ixgb/ixgb_param.c
38013+++ b/drivers/net/ixgb/ixgb_param.c
38014@@ -260,6 +260,9 @@ void __devinit
38015 ixgb_check_options(struct ixgb_adapter *adapter)
38016 {
38017 int bd = adapter->bd_number;
38018+
38019+ pax_track_stack();
38020+
38021 if (bd >= IXGB_MAX_NIC) {
38022 printk(KERN_NOTICE
38023 "Warning: no configuration for board #%i\n", bd);
38024diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
38025index b17aa73..ed74540 100644
38026--- a/drivers/net/ixgbe/ixgbe_type.h
38027+++ b/drivers/net/ixgbe/ixgbe_type.h
38028@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
38029 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
38030 s32 (*update_checksum)(struct ixgbe_hw *);
38031 };
38032+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
38033
38034 struct ixgbe_mac_operations {
38035 s32 (*init_hw)(struct ixgbe_hw *);
38036@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
38037 /* Flow Control */
38038 s32 (*fc_enable)(struct ixgbe_hw *, s32);
38039 };
38040+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
38041
38042 struct ixgbe_phy_operations {
38043 s32 (*identify)(struct ixgbe_hw *);
38044@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
38045 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
38046 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
38047 };
38048+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
38049
38050 struct ixgbe_eeprom_info {
38051- struct ixgbe_eeprom_operations ops;
38052+ ixgbe_eeprom_operations_no_const ops;
38053 enum ixgbe_eeprom_type type;
38054 u32 semaphore_delay;
38055 u16 word_size;
38056@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
38057 };
38058
38059 struct ixgbe_mac_info {
38060- struct ixgbe_mac_operations ops;
38061+ ixgbe_mac_operations_no_const ops;
38062 enum ixgbe_mac_type type;
38063 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38064 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38065@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
38066 };
38067
38068 struct ixgbe_phy_info {
38069- struct ixgbe_phy_operations ops;
38070+ ixgbe_phy_operations_no_const ops;
38071 struct mdio_if_info mdio;
38072 enum ixgbe_phy_type type;
38073 u32 id;
38074diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
38075index 291a505..2543756 100644
38076--- a/drivers/net/mlx4/main.c
38077+++ b/drivers/net/mlx4/main.c
38078@@ -38,6 +38,7 @@
38079 #include <linux/errno.h>
38080 #include <linux/pci.h>
38081 #include <linux/dma-mapping.h>
38082+#include <linux/sched.h>
38083
38084 #include <linux/mlx4/device.h>
38085 #include <linux/mlx4/doorbell.h>
38086@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
38087 u64 icm_size;
38088 int err;
38089
38090+ pax_track_stack();
38091+
38092 err = mlx4_QUERY_FW(dev);
38093 if (err) {
38094 if (err == -EACCES)
38095diff --git a/drivers/net/niu.c b/drivers/net/niu.c
38096index 2dce134..fa5ce75 100644
38097--- a/drivers/net/niu.c
38098+++ b/drivers/net/niu.c
38099@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
38100 int i, num_irqs, err;
38101 u8 first_ldg;
38102
38103+ pax_track_stack();
38104+
38105 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
38106 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
38107 ldg_num_map[i] = first_ldg + i;
38108diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
38109index c1b3f09..97cd8c4 100644
38110--- a/drivers/net/pcnet32.c
38111+++ b/drivers/net/pcnet32.c
38112@@ -79,7 +79,7 @@ static int cards_found;
38113 /*
38114 * VLB I/O addresses
38115 */
38116-static unsigned int pcnet32_portlist[] __initdata =
38117+static unsigned int pcnet32_portlist[] __devinitdata =
38118 { 0x300, 0x320, 0x340, 0x360, 0 };
38119
38120 static int pcnet32_debug = 0;
38121@@ -267,7 +267,7 @@ struct pcnet32_private {
38122 struct sk_buff **rx_skbuff;
38123 dma_addr_t *tx_dma_addr;
38124 dma_addr_t *rx_dma_addr;
38125- struct pcnet32_access a;
38126+ struct pcnet32_access *a;
38127 spinlock_t lock; /* Guard lock */
38128 unsigned int cur_rx, cur_tx; /* The next free ring entry */
38129 unsigned int rx_ring_size; /* current rx ring size */
38130@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
38131 u16 val;
38132
38133 netif_wake_queue(dev);
38134- val = lp->a.read_csr(ioaddr, CSR3);
38135+ val = lp->a->read_csr(ioaddr, CSR3);
38136 val &= 0x00ff;
38137- lp->a.write_csr(ioaddr, CSR3, val);
38138+ lp->a->write_csr(ioaddr, CSR3, val);
38139 napi_enable(&lp->napi);
38140 }
38141
38142@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
38143 r = mii_link_ok(&lp->mii_if);
38144 } else if (lp->chip_version >= PCNET32_79C970A) {
38145 ulong ioaddr = dev->base_addr; /* card base I/O address */
38146- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38147+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38148 } else { /* can not detect link on really old chips */
38149 r = 1;
38150 }
38151@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
38152 pcnet32_netif_stop(dev);
38153
38154 spin_lock_irqsave(&lp->lock, flags);
38155- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38156+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38157
38158 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
38159
38160@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
38161 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38162 {
38163 struct pcnet32_private *lp = netdev_priv(dev);
38164- struct pcnet32_access *a = &lp->a; /* access to registers */
38165+ struct pcnet32_access *a = lp->a; /* access to registers */
38166 ulong ioaddr = dev->base_addr; /* card base I/O address */
38167 struct sk_buff *skb; /* sk buff */
38168 int x, i; /* counters */
38169@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38170 pcnet32_netif_stop(dev);
38171
38172 spin_lock_irqsave(&lp->lock, flags);
38173- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38174+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38175
38176 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
38177
38178 /* Reset the PCNET32 */
38179- lp->a.reset(ioaddr);
38180- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38181+ lp->a->reset(ioaddr);
38182+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38183
38184 /* switch pcnet32 to 32bit mode */
38185- lp->a.write_bcr(ioaddr, 20, 2);
38186+ lp->a->write_bcr(ioaddr, 20, 2);
38187
38188 /* purge & init rings but don't actually restart */
38189 pcnet32_restart(dev, 0x0000);
38190
38191- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38192+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38193
38194 /* Initialize Transmit buffers. */
38195 size = data_len + 15;
38196@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38197
38198 /* set int loopback in CSR15 */
38199 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
38200- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
38201+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
38202
38203 teststatus = cpu_to_le16(0x8000);
38204- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38205+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38206
38207 /* Check status of descriptors */
38208 for (x = 0; x < numbuffs; x++) {
38209@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38210 }
38211 }
38212
38213- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38214+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38215 wmb();
38216 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
38217 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
38218@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38219 pcnet32_restart(dev, CSR0_NORMAL);
38220 } else {
38221 pcnet32_purge_rx_ring(dev);
38222- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38223+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38224 }
38225 spin_unlock_irqrestore(&lp->lock, flags);
38226
38227@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38228 static void pcnet32_led_blink_callback(struct net_device *dev)
38229 {
38230 struct pcnet32_private *lp = netdev_priv(dev);
38231- struct pcnet32_access *a = &lp->a;
38232+ struct pcnet32_access *a = lp->a;
38233 ulong ioaddr = dev->base_addr;
38234 unsigned long flags;
38235 int i;
38236@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
38237 static int pcnet32_phys_id(struct net_device *dev, u32 data)
38238 {
38239 struct pcnet32_private *lp = netdev_priv(dev);
38240- struct pcnet32_access *a = &lp->a;
38241+ struct pcnet32_access *a = lp->a;
38242 ulong ioaddr = dev->base_addr;
38243 unsigned long flags;
38244 int i, regs[4];
38245@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
38246 {
38247 int csr5;
38248 struct pcnet32_private *lp = netdev_priv(dev);
38249- struct pcnet32_access *a = &lp->a;
38250+ struct pcnet32_access *a = lp->a;
38251 ulong ioaddr = dev->base_addr;
38252 int ticks;
38253
38254@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38255 spin_lock_irqsave(&lp->lock, flags);
38256 if (pcnet32_tx(dev)) {
38257 /* reset the chip to clear the error condition, then restart */
38258- lp->a.reset(ioaddr);
38259- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38260+ lp->a->reset(ioaddr);
38261+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38262 pcnet32_restart(dev, CSR0_START);
38263 netif_wake_queue(dev);
38264 }
38265@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38266 __napi_complete(napi);
38267
38268 /* clear interrupt masks */
38269- val = lp->a.read_csr(ioaddr, CSR3);
38270+ val = lp->a->read_csr(ioaddr, CSR3);
38271 val &= 0x00ff;
38272- lp->a.write_csr(ioaddr, CSR3, val);
38273+ lp->a->write_csr(ioaddr, CSR3, val);
38274
38275 /* Set interrupt enable. */
38276- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
38277+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
38278
38279 spin_unlock_irqrestore(&lp->lock, flags);
38280 }
38281@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38282 int i, csr0;
38283 u16 *buff = ptr;
38284 struct pcnet32_private *lp = netdev_priv(dev);
38285- struct pcnet32_access *a = &lp->a;
38286+ struct pcnet32_access *a = lp->a;
38287 ulong ioaddr = dev->base_addr;
38288 unsigned long flags;
38289
38290@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38291 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
38292 if (lp->phymask & (1 << j)) {
38293 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
38294- lp->a.write_bcr(ioaddr, 33,
38295+ lp->a->write_bcr(ioaddr, 33,
38296 (j << 5) | i);
38297- *buff++ = lp->a.read_bcr(ioaddr, 34);
38298+ *buff++ = lp->a->read_bcr(ioaddr, 34);
38299 }
38300 }
38301 }
38302@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38303 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
38304 lp->options |= PCNET32_PORT_FD;
38305
38306- lp->a = *a;
38307+ lp->a = a;
38308
38309 /* prior to register_netdev, dev->name is not yet correct */
38310 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
38311@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38312 if (lp->mii) {
38313 /* lp->phycount and lp->phymask are set to 0 by memset above */
38314
38315- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38316+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38317 /* scan for PHYs */
38318 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38319 unsigned short id1, id2;
38320@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38321 "Found PHY %04x:%04x at address %d.\n",
38322 id1, id2, i);
38323 }
38324- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38325+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38326 if (lp->phycount > 1) {
38327 lp->options |= PCNET32_PORT_MII;
38328 }
38329@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
38330 }
38331
38332 /* Reset the PCNET32 */
38333- lp->a.reset(ioaddr);
38334+ lp->a->reset(ioaddr);
38335
38336 /* switch pcnet32 to 32bit mode */
38337- lp->a.write_bcr(ioaddr, 20, 2);
38338+ lp->a->write_bcr(ioaddr, 20, 2);
38339
38340 if (netif_msg_ifup(lp))
38341 printk(KERN_DEBUG
38342@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
38343 (u32) (lp->init_dma_addr));
38344
38345 /* set/reset autoselect bit */
38346- val = lp->a.read_bcr(ioaddr, 2) & ~2;
38347+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
38348 if (lp->options & PCNET32_PORT_ASEL)
38349 val |= 2;
38350- lp->a.write_bcr(ioaddr, 2, val);
38351+ lp->a->write_bcr(ioaddr, 2, val);
38352
38353 /* handle full duplex setting */
38354 if (lp->mii_if.full_duplex) {
38355- val = lp->a.read_bcr(ioaddr, 9) & ~3;
38356+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
38357 if (lp->options & PCNET32_PORT_FD) {
38358 val |= 1;
38359 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
38360@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
38361 if (lp->chip_version == 0x2627)
38362 val |= 3;
38363 }
38364- lp->a.write_bcr(ioaddr, 9, val);
38365+ lp->a->write_bcr(ioaddr, 9, val);
38366 }
38367
38368 /* set/reset GPSI bit in test register */
38369- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
38370+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
38371 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
38372 val |= 0x10;
38373- lp->a.write_csr(ioaddr, 124, val);
38374+ lp->a->write_csr(ioaddr, 124, val);
38375
38376 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
38377 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
38378@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
38379 * duplex, and/or enable auto negotiation, and clear DANAS
38380 */
38381 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
38382- lp->a.write_bcr(ioaddr, 32,
38383- lp->a.read_bcr(ioaddr, 32) | 0x0080);
38384+ lp->a->write_bcr(ioaddr, 32,
38385+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
38386 /* disable Auto Negotiation, set 10Mpbs, HD */
38387- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
38388+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
38389 if (lp->options & PCNET32_PORT_FD)
38390 val |= 0x10;
38391 if (lp->options & PCNET32_PORT_100)
38392 val |= 0x08;
38393- lp->a.write_bcr(ioaddr, 32, val);
38394+ lp->a->write_bcr(ioaddr, 32, val);
38395 } else {
38396 if (lp->options & PCNET32_PORT_ASEL) {
38397- lp->a.write_bcr(ioaddr, 32,
38398- lp->a.read_bcr(ioaddr,
38399+ lp->a->write_bcr(ioaddr, 32,
38400+ lp->a->read_bcr(ioaddr,
38401 32) | 0x0080);
38402 /* enable auto negotiate, setup, disable fd */
38403- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
38404+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
38405 val |= 0x20;
38406- lp->a.write_bcr(ioaddr, 32, val);
38407+ lp->a->write_bcr(ioaddr, 32, val);
38408 }
38409 }
38410 } else {
38411@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
38412 * There is really no good other way to handle multiple PHYs
38413 * other than turning off all automatics
38414 */
38415- val = lp->a.read_bcr(ioaddr, 2);
38416- lp->a.write_bcr(ioaddr, 2, val & ~2);
38417- val = lp->a.read_bcr(ioaddr, 32);
38418- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38419+ val = lp->a->read_bcr(ioaddr, 2);
38420+ lp->a->write_bcr(ioaddr, 2, val & ~2);
38421+ val = lp->a->read_bcr(ioaddr, 32);
38422+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38423
38424 if (!(lp->options & PCNET32_PORT_ASEL)) {
38425 /* setup ecmd */
38426@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
38427 ecmd.speed =
38428 lp->
38429 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
38430- bcr9 = lp->a.read_bcr(ioaddr, 9);
38431+ bcr9 = lp->a->read_bcr(ioaddr, 9);
38432
38433 if (lp->options & PCNET32_PORT_FD) {
38434 ecmd.duplex = DUPLEX_FULL;
38435@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
38436 ecmd.duplex = DUPLEX_HALF;
38437 bcr9 |= ~(1 << 0);
38438 }
38439- lp->a.write_bcr(ioaddr, 9, bcr9);
38440+ lp->a->write_bcr(ioaddr, 9, bcr9);
38441 }
38442
38443 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38444@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
38445
38446 #ifdef DO_DXSUFLO
38447 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
38448- val = lp->a.read_csr(ioaddr, CSR3);
38449+ val = lp->a->read_csr(ioaddr, CSR3);
38450 val |= 0x40;
38451- lp->a.write_csr(ioaddr, CSR3, val);
38452+ lp->a->write_csr(ioaddr, CSR3, val);
38453 }
38454 #endif
38455
38456@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
38457 napi_enable(&lp->napi);
38458
38459 /* Re-initialize the PCNET32, and start it when done. */
38460- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38461- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38462+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38463+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38464
38465- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38466- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38467+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38468+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38469
38470 netif_start_queue(dev);
38471
38472@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
38473
38474 i = 0;
38475 while (i++ < 100)
38476- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38477+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38478 break;
38479 /*
38480 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
38481 * reports that doing so triggers a bug in the '974.
38482 */
38483- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
38484+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
38485
38486 if (netif_msg_ifup(lp))
38487 printk(KERN_DEBUG
38488 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
38489 dev->name, i,
38490 (u32) (lp->init_dma_addr),
38491- lp->a.read_csr(ioaddr, CSR0));
38492+ lp->a->read_csr(ioaddr, CSR0));
38493
38494 spin_unlock_irqrestore(&lp->lock, flags);
38495
38496@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
38497 * Switch back to 16bit mode to avoid problems with dumb
38498 * DOS packet driver after a warm reboot
38499 */
38500- lp->a.write_bcr(ioaddr, 20, 4);
38501+ lp->a->write_bcr(ioaddr, 20, 4);
38502
38503 err_free_irq:
38504 spin_unlock_irqrestore(&lp->lock, flags);
38505@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38506
38507 /* wait for stop */
38508 for (i = 0; i < 100; i++)
38509- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
38510+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
38511 break;
38512
38513 if (i >= 100 && netif_msg_drv(lp))
38514@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38515 return;
38516
38517 /* ReInit Ring */
38518- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38519+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38520 i = 0;
38521 while (i++ < 1000)
38522- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38523+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38524 break;
38525
38526- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
38527+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
38528 }
38529
38530 static void pcnet32_tx_timeout(struct net_device *dev)
38531@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
38532 if (pcnet32_debug & NETIF_MSG_DRV)
38533 printk(KERN_ERR
38534 "%s: transmit timed out, status %4.4x, resetting.\n",
38535- dev->name, lp->a.read_csr(ioaddr, CSR0));
38536- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38537+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38538+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38539 dev->stats.tx_errors++;
38540 if (netif_msg_tx_err(lp)) {
38541 int i;
38542@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38543 if (netif_msg_tx_queued(lp)) {
38544 printk(KERN_DEBUG
38545 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
38546- dev->name, lp->a.read_csr(ioaddr, CSR0));
38547+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38548 }
38549
38550 /* Default status -- will not enable Successful-TxDone
38551@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38552 dev->stats.tx_bytes += skb->len;
38553
38554 /* Trigger an immediate send poll. */
38555- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38556+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38557
38558 dev->trans_start = jiffies;
38559
38560@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
38561
38562 spin_lock(&lp->lock);
38563
38564- csr0 = lp->a.read_csr(ioaddr, CSR0);
38565+ csr0 = lp->a->read_csr(ioaddr, CSR0);
38566 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
38567 if (csr0 == 0xffff) {
38568 break; /* PCMCIA remove happened */
38569 }
38570 /* Acknowledge all of the current interrupt sources ASAP. */
38571- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38572+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38573
38574 if (netif_msg_intr(lp))
38575 printk(KERN_DEBUG
38576 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
38577- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
38578+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
38579
38580 /* Log misc errors. */
38581 if (csr0 & 0x4000)
38582@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
38583 if (napi_schedule_prep(&lp->napi)) {
38584 u16 val;
38585 /* set interrupt masks */
38586- val = lp->a.read_csr(ioaddr, CSR3);
38587+ val = lp->a->read_csr(ioaddr, CSR3);
38588 val |= 0x5f00;
38589- lp->a.write_csr(ioaddr, CSR3, val);
38590+ lp->a->write_csr(ioaddr, CSR3, val);
38591
38592 __napi_schedule(&lp->napi);
38593 break;
38594 }
38595- csr0 = lp->a.read_csr(ioaddr, CSR0);
38596+ csr0 = lp->a->read_csr(ioaddr, CSR0);
38597 }
38598
38599 if (netif_msg_intr(lp))
38600 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
38601- dev->name, lp->a.read_csr(ioaddr, CSR0));
38602+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38603
38604 spin_unlock(&lp->lock);
38605
38606@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
38607
38608 spin_lock_irqsave(&lp->lock, flags);
38609
38610- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38611+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38612
38613 if (netif_msg_ifdown(lp))
38614 printk(KERN_DEBUG
38615 "%s: Shutting down ethercard, status was %2.2x.\n",
38616- dev->name, lp->a.read_csr(ioaddr, CSR0));
38617+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38618
38619 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
38620- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38621+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38622
38623 /*
38624 * Switch back to 16bit mode to avoid problems with dumb
38625 * DOS packet driver after a warm reboot
38626 */
38627- lp->a.write_bcr(ioaddr, 20, 4);
38628+ lp->a->write_bcr(ioaddr, 20, 4);
38629
38630 spin_unlock_irqrestore(&lp->lock, flags);
38631
38632@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
38633 unsigned long flags;
38634
38635 spin_lock_irqsave(&lp->lock, flags);
38636- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38637+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38638 spin_unlock_irqrestore(&lp->lock, flags);
38639
38640 return &dev->stats;
38641@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
38642 if (dev->flags & IFF_ALLMULTI) {
38643 ib->filter[0] = cpu_to_le32(~0U);
38644 ib->filter[1] = cpu_to_le32(~0U);
38645- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
38646- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
38647- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
38648- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
38649+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
38650+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
38651+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
38652+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
38653 return;
38654 }
38655 /* clear the multicast filter */
38656@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
38657 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
38658 }
38659 for (i = 0; i < 4; i++)
38660- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
38661+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
38662 le16_to_cpu(mcast_table[i]));
38663 return;
38664 }
38665@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
38666
38667 spin_lock_irqsave(&lp->lock, flags);
38668 suspended = pcnet32_suspend(dev, &flags, 0);
38669- csr15 = lp->a.read_csr(ioaddr, CSR15);
38670+ csr15 = lp->a->read_csr(ioaddr, CSR15);
38671 if (dev->flags & IFF_PROMISC) {
38672 /* Log any net taps. */
38673 if (netif_msg_hw(lp))
38674@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
38675 lp->init_block->mode =
38676 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
38677 7);
38678- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
38679+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
38680 } else {
38681 lp->init_block->mode =
38682 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
38683- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
38684+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
38685 pcnet32_load_multicast(dev);
38686 }
38687
38688 if (suspended) {
38689 int csr5;
38690 /* clear SUSPEND (SPND) - CSR5 bit 0 */
38691- csr5 = lp->a.read_csr(ioaddr, CSR5);
38692- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
38693+ csr5 = lp->a->read_csr(ioaddr, CSR5);
38694+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
38695 } else {
38696- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38697+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38698 pcnet32_restart(dev, CSR0_NORMAL);
38699 netif_wake_queue(dev);
38700 }
38701@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
38702 if (!lp->mii)
38703 return 0;
38704
38705- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38706- val_out = lp->a.read_bcr(ioaddr, 34);
38707+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38708+ val_out = lp->a->read_bcr(ioaddr, 34);
38709
38710 return val_out;
38711 }
38712@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
38713 if (!lp->mii)
38714 return;
38715
38716- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38717- lp->a.write_bcr(ioaddr, 34, val);
38718+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38719+ lp->a->write_bcr(ioaddr, 34, val);
38720 }
38721
38722 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38723@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
38724 curr_link = mii_link_ok(&lp->mii_if);
38725 } else {
38726 ulong ioaddr = dev->base_addr; /* card base I/O address */
38727- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38728+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38729 }
38730 if (!curr_link) {
38731 if (prev_link || verbose) {
38732@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
38733 (ecmd.duplex ==
38734 DUPLEX_FULL) ? "full" : "half");
38735 }
38736- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
38737+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
38738 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
38739 if (lp->mii_if.full_duplex)
38740 bcr9 |= (1 << 0);
38741 else
38742 bcr9 &= ~(1 << 0);
38743- lp->a.write_bcr(dev->base_addr, 9, bcr9);
38744+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
38745 }
38746 } else {
38747 if (netif_msg_link(lp))
38748diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
38749index 7cc9898..6eb50d3 100644
38750--- a/drivers/net/sis190.c
38751+++ b/drivers/net/sis190.c
38752@@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
38753 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
38754 struct net_device *dev)
38755 {
38756- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
38757+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
38758 struct sis190_private *tp = netdev_priv(dev);
38759 struct pci_dev *isa_bridge;
38760 u8 reg, tmp8;
38761diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
38762index e13685a..60c948c 100644
38763--- a/drivers/net/sundance.c
38764+++ b/drivers/net/sundance.c
38765@@ -225,7 +225,7 @@ enum {
38766 struct pci_id_info {
38767 const char *name;
38768 };
38769-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
38770+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
38771 {"D-Link DFE-550TX FAST Ethernet Adapter"},
38772 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
38773 {"D-Link DFE-580TX 4 port Server Adapter"},
38774diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
38775index 529f55a..cccaa18 100644
38776--- a/drivers/net/tg3.h
38777+++ b/drivers/net/tg3.h
38778@@ -95,6 +95,7 @@
38779 #define CHIPREV_ID_5750_A0 0x4000
38780 #define CHIPREV_ID_5750_A1 0x4001
38781 #define CHIPREV_ID_5750_A3 0x4003
38782+#define CHIPREV_ID_5750_C1 0x4201
38783 #define CHIPREV_ID_5750_C2 0x4202
38784 #define CHIPREV_ID_5752_A0_HW 0x5000
38785 #define CHIPREV_ID_5752_A0 0x6000
38786diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
38787index b9db1b5..720f9ce 100644
38788--- a/drivers/net/tokenring/abyss.c
38789+++ b/drivers/net/tokenring/abyss.c
38790@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
38791
38792 static int __init abyss_init (void)
38793 {
38794- abyss_netdev_ops = tms380tr_netdev_ops;
38795+ pax_open_kernel();
38796+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38797
38798- abyss_netdev_ops.ndo_open = abyss_open;
38799- abyss_netdev_ops.ndo_stop = abyss_close;
38800+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
38801+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
38802+ pax_close_kernel();
38803
38804 return pci_register_driver(&abyss_driver);
38805 }
38806diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
38807index 456f8bf..373e56d 100644
38808--- a/drivers/net/tokenring/madgemc.c
38809+++ b/drivers/net/tokenring/madgemc.c
38810@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
38811
38812 static int __init madgemc_init (void)
38813 {
38814- madgemc_netdev_ops = tms380tr_netdev_ops;
38815- madgemc_netdev_ops.ndo_open = madgemc_open;
38816- madgemc_netdev_ops.ndo_stop = madgemc_close;
38817+ pax_open_kernel();
38818+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38819+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
38820+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
38821+ pax_close_kernel();
38822
38823 return mca_register_driver (&madgemc_driver);
38824 }
38825diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
38826index 16e8783..925bd49 100644
38827--- a/drivers/net/tokenring/proteon.c
38828+++ b/drivers/net/tokenring/proteon.c
38829@@ -353,9 +353,11 @@ static int __init proteon_init(void)
38830 struct platform_device *pdev;
38831 int i, num = 0, err = 0;
38832
38833- proteon_netdev_ops = tms380tr_netdev_ops;
38834- proteon_netdev_ops.ndo_open = proteon_open;
38835- proteon_netdev_ops.ndo_stop = tms380tr_close;
38836+ pax_open_kernel();
38837+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38838+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
38839+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
38840+ pax_close_kernel();
38841
38842 err = platform_driver_register(&proteon_driver);
38843 if (err)
38844diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
38845index 46db5c5..37c1536 100644
38846--- a/drivers/net/tokenring/skisa.c
38847+++ b/drivers/net/tokenring/skisa.c
38848@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
38849 struct platform_device *pdev;
38850 int i, num = 0, err = 0;
38851
38852- sk_isa_netdev_ops = tms380tr_netdev_ops;
38853- sk_isa_netdev_ops.ndo_open = sk_isa_open;
38854- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
38855+ pax_open_kernel();
38856+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38857+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
38858+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
38859+ pax_close_kernel();
38860
38861 err = platform_driver_register(&sk_isa_driver);
38862 if (err)
38863diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
38864index 74e5ba4..5cf6bc9 100644
38865--- a/drivers/net/tulip/de2104x.c
38866+++ b/drivers/net/tulip/de2104x.c
38867@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
38868 struct de_srom_info_leaf *il;
38869 void *bufp;
38870
38871+ pax_track_stack();
38872+
38873 /* download entire eeprom */
38874 for (i = 0; i < DE_EEPROM_WORDS; i++)
38875 ((__le16 *)ee_data)[i] =
38876diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
38877index a8349b7..90f9dfe 100644
38878--- a/drivers/net/tulip/de4x5.c
38879+++ b/drivers/net/tulip/de4x5.c
38880@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38881 for (i=0; i<ETH_ALEN; i++) {
38882 tmp.addr[i] = dev->dev_addr[i];
38883 }
38884- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
38885+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
38886 break;
38887
38888 case DE4X5_SET_HWADDR: /* Set the hardware address */
38889@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38890 spin_lock_irqsave(&lp->lock, flags);
38891 memcpy(&statbuf, &lp->pktStats, ioc->len);
38892 spin_unlock_irqrestore(&lp->lock, flags);
38893- if (copy_to_user(ioc->data, &statbuf, ioc->len))
38894+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
38895 return -EFAULT;
38896 break;
38897 }
38898diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
38899index 391acd3..56d11cd 100644
38900--- a/drivers/net/tulip/eeprom.c
38901+++ b/drivers/net/tulip/eeprom.c
38902@@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
38903 {NULL}};
38904
38905
38906-static const char *block_name[] __devinitdata = {
38907+static const char *block_name[] __devinitconst = {
38908 "21140 non-MII",
38909 "21140 MII PHY",
38910 "21142 Serial PHY",
38911diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
38912index b38d3b7..b1cff23 100644
38913--- a/drivers/net/tulip/winbond-840.c
38914+++ b/drivers/net/tulip/winbond-840.c
38915@@ -235,7 +235,7 @@ struct pci_id_info {
38916 int drv_flags; /* Driver use, intended as capability flags. */
38917 };
38918
38919-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
38920+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
38921 { /* Sometime a Level-One switch card. */
38922 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
38923 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
38924diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
38925index f450bc9..2b747c8 100644
38926--- a/drivers/net/usb/hso.c
38927+++ b/drivers/net/usb/hso.c
38928@@ -71,7 +71,7 @@
38929 #include <asm/byteorder.h>
38930 #include <linux/serial_core.h>
38931 #include <linux/serial.h>
38932-
38933+#include <asm/local.h>
38934
38935 #define DRIVER_VERSION "1.2"
38936 #define MOD_AUTHOR "Option Wireless"
38937@@ -258,7 +258,7 @@ struct hso_serial {
38938
38939 /* from usb_serial_port */
38940 struct tty_struct *tty;
38941- int open_count;
38942+ local_t open_count;
38943 spinlock_t serial_lock;
38944
38945 int (*write_data) (struct hso_serial *serial);
38946@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
38947 struct urb *urb;
38948
38949 urb = serial->rx_urb[0];
38950- if (serial->open_count > 0) {
38951+ if (local_read(&serial->open_count) > 0) {
38952 count = put_rxbuf_data(urb, serial);
38953 if (count == -1)
38954 return;
38955@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
38956 DUMP1(urb->transfer_buffer, urb->actual_length);
38957
38958 /* Anyone listening? */
38959- if (serial->open_count == 0)
38960+ if (local_read(&serial->open_count) == 0)
38961 return;
38962
38963 if (status == 0) {
38964@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
38965 spin_unlock_irq(&serial->serial_lock);
38966
38967 /* check for port already opened, if not set the termios */
38968- serial->open_count++;
38969- if (serial->open_count == 1) {
38970+ if (local_inc_return(&serial->open_count) == 1) {
38971 tty->low_latency = 1;
38972 serial->rx_state = RX_IDLE;
38973 /* Force default termio settings */
38974@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
38975 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
38976 if (result) {
38977 hso_stop_serial_device(serial->parent);
38978- serial->open_count--;
38979+ local_dec(&serial->open_count);
38980 kref_put(&serial->parent->ref, hso_serial_ref_free);
38981 }
38982 } else {
38983@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
38984
38985 /* reset the rts and dtr */
38986 /* do the actual close */
38987- serial->open_count--;
38988+ local_dec(&serial->open_count);
38989
38990- if (serial->open_count <= 0) {
38991- serial->open_count = 0;
38992+ if (local_read(&serial->open_count) <= 0) {
38993+ local_set(&serial->open_count, 0);
38994 spin_lock_irq(&serial->serial_lock);
38995 if (serial->tty == tty) {
38996 serial->tty->driver_data = NULL;
38997@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
38998
38999 /* the actual setup */
39000 spin_lock_irqsave(&serial->serial_lock, flags);
39001- if (serial->open_count)
39002+ if (local_read(&serial->open_count))
39003 _hso_serial_set_termios(tty, old);
39004 else
39005 tty->termios = old;
39006@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
39007 /* Start all serial ports */
39008 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39009 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39010- if (dev2ser(serial_table[i])->open_count) {
39011+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
39012 result =
39013 hso_start_serial_device(serial_table[i], GFP_NOIO);
39014 hso_kick_transmit(dev2ser(serial_table[i]));
39015diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
39016index 3e94f0c..ffdd926 100644
39017--- a/drivers/net/vxge/vxge-config.h
39018+++ b/drivers/net/vxge/vxge-config.h
39019@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
39020 void (*link_down)(struct __vxge_hw_device *devh);
39021 void (*crit_err)(struct __vxge_hw_device *devh,
39022 enum vxge_hw_event type, u64 ext_data);
39023-};
39024+} __no_const;
39025
39026 /*
39027 * struct __vxge_hw_blockpool_entry - Block private data structure
39028diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
39029index 068d7a9..35293de 100644
39030--- a/drivers/net/vxge/vxge-main.c
39031+++ b/drivers/net/vxge/vxge-main.c
39032@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
39033 struct sk_buff *completed[NR_SKB_COMPLETED];
39034 int more;
39035
39036+ pax_track_stack();
39037+
39038 do {
39039 more = 0;
39040 skb_ptr = completed;
39041@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
39042 u8 mtable[256] = {0}; /* CPU to vpath mapping */
39043 int index;
39044
39045+ pax_track_stack();
39046+
39047 /*
39048 * Filling
39049 * - itable with bucket numbers
39050diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
39051index 461742b..81be42e 100644
39052--- a/drivers/net/vxge/vxge-traffic.h
39053+++ b/drivers/net/vxge/vxge-traffic.h
39054@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
39055 struct vxge_hw_mempool_dma *dma_object,
39056 u32 index,
39057 u32 is_last);
39058-};
39059+} __no_const;
39060
39061 void
39062 __vxge_hw_mempool_destroy(
39063diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
39064index cd8cb95..4153b79 100644
39065--- a/drivers/net/wan/cycx_x25.c
39066+++ b/drivers/net/wan/cycx_x25.c
39067@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
39068 unsigned char hex[1024],
39069 * phex = hex;
39070
39071+ pax_track_stack();
39072+
39073 if (len >= (sizeof(hex) / 2))
39074 len = (sizeof(hex) / 2) - 1;
39075
39076diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
39077index aa9248f..a4e3c3b 100644
39078--- a/drivers/net/wan/hdlc_x25.c
39079+++ b/drivers/net/wan/hdlc_x25.c
39080@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
39081
39082 static int x25_open(struct net_device *dev)
39083 {
39084- struct lapb_register_struct cb;
39085+ static struct lapb_register_struct cb = {
39086+ .connect_confirmation = x25_connected,
39087+ .connect_indication = x25_connected,
39088+ .disconnect_confirmation = x25_disconnected,
39089+ .disconnect_indication = x25_disconnected,
39090+ .data_indication = x25_data_indication,
39091+ .data_transmit = x25_data_transmit
39092+ };
39093 int result;
39094
39095- cb.connect_confirmation = x25_connected;
39096- cb.connect_indication = x25_connected;
39097- cb.disconnect_confirmation = x25_disconnected;
39098- cb.disconnect_indication = x25_disconnected;
39099- cb.data_indication = x25_data_indication;
39100- cb.data_transmit = x25_data_transmit;
39101-
39102 result = lapb_register(dev, &cb);
39103 if (result != LAPB_OK)
39104 return result;
39105diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
39106index 5ad287c..783b020 100644
39107--- a/drivers/net/wimax/i2400m/usb-fw.c
39108+++ b/drivers/net/wimax/i2400m/usb-fw.c
39109@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
39110 int do_autopm = 1;
39111 DECLARE_COMPLETION_ONSTACK(notif_completion);
39112
39113+ pax_track_stack();
39114+
39115 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
39116 i2400m, ack, ack_size);
39117 BUG_ON(_ack == i2400m->bm_ack_buf);
39118diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
39119index 6c26840..62c97c3 100644
39120--- a/drivers/net/wireless/airo.c
39121+++ b/drivers/net/wireless/airo.c
39122@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
39123 BSSListElement * loop_net;
39124 BSSListElement * tmp_net;
39125
39126+ pax_track_stack();
39127+
39128 /* Blow away current list of scan results */
39129 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
39130 list_move_tail (&loop_net->list, &ai->network_free_list);
39131@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
39132 WepKeyRid wkr;
39133 int rc;
39134
39135+ pax_track_stack();
39136+
39137 memset( &mySsid, 0, sizeof( mySsid ) );
39138 kfree (ai->flash);
39139 ai->flash = NULL;
39140@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
39141 __le32 *vals = stats.vals;
39142 int len;
39143
39144+ pax_track_stack();
39145+
39146 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39147 return -ENOMEM;
39148 data = (struct proc_data *)file->private_data;
39149@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
39150 /* If doLoseSync is not 1, we won't do a Lose Sync */
39151 int doLoseSync = -1;
39152
39153+ pax_track_stack();
39154+
39155 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39156 return -ENOMEM;
39157 data = (struct proc_data *)file->private_data;
39158@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
39159 int i;
39160 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
39161
39162+ pax_track_stack();
39163+
39164 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
39165 if (!qual)
39166 return -ENOMEM;
39167@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
39168 CapabilityRid cap_rid;
39169 __le32 *vals = stats_rid.vals;
39170
39171+ pax_track_stack();
39172+
39173 /* Get stats out of the card */
39174 clear_bit(JOB_WSTATS, &local->jobs);
39175 if (local->power.event) {
39176diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
39177index 747508c..82e965d 100644
39178--- a/drivers/net/wireless/ath/ath5k/debug.c
39179+++ b/drivers/net/wireless/ath/ath5k/debug.c
39180@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
39181 unsigned int v;
39182 u64 tsf;
39183
39184+ pax_track_stack();
39185+
39186 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
39187 len += snprintf(buf+len, sizeof(buf)-len,
39188 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
39189@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
39190 unsigned int len = 0;
39191 unsigned int i;
39192
39193+ pax_track_stack();
39194+
39195 len += snprintf(buf+len, sizeof(buf)-len,
39196 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
39197
39198diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
39199index 2be4c22..593b1eb 100644
39200--- a/drivers/net/wireless/ath/ath9k/debug.c
39201+++ b/drivers/net/wireless/ath/ath9k/debug.c
39202@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
39203 char buf[512];
39204 unsigned int len = 0;
39205
39206+ pax_track_stack();
39207+
39208 len += snprintf(buf + len, sizeof(buf) - len,
39209 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
39210 len += snprintf(buf + len, sizeof(buf) - len,
39211@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
39212 int i;
39213 u8 addr[ETH_ALEN];
39214
39215+ pax_track_stack();
39216+
39217 len += snprintf(buf + len, sizeof(buf) - len,
39218 "primary: %s (%s chan=%d ht=%d)\n",
39219 wiphy_name(sc->pri_wiphy->hw->wiphy),
39220diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
39221index 80b19a4..dab3a45 100644
39222--- a/drivers/net/wireless/b43/debugfs.c
39223+++ b/drivers/net/wireless/b43/debugfs.c
39224@@ -43,7 +43,7 @@ static struct dentry *rootdir;
39225 struct b43_debugfs_fops {
39226 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
39227 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
39228- struct file_operations fops;
39229+ const struct file_operations fops;
39230 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
39231 size_t file_struct_offset;
39232 };
39233diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
39234index 1f85ac5..c99b4b4 100644
39235--- a/drivers/net/wireless/b43legacy/debugfs.c
39236+++ b/drivers/net/wireless/b43legacy/debugfs.c
39237@@ -44,7 +44,7 @@ static struct dentry *rootdir;
39238 struct b43legacy_debugfs_fops {
39239 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
39240 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
39241- struct file_operations fops;
39242+ const struct file_operations fops;
39243 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
39244 size_t file_struct_offset;
39245 /* Take wl->irq_lock before calling read/write? */
39246diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
39247index 43102bf..3b569c3 100644
39248--- a/drivers/net/wireless/ipw2x00/ipw2100.c
39249+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
39250@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
39251 int err;
39252 DECLARE_SSID_BUF(ssid);
39253
39254+ pax_track_stack();
39255+
39256 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
39257
39258 if (ssid_len)
39259@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
39260 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
39261 int err;
39262
39263+ pax_track_stack();
39264+
39265 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
39266 idx, keylen, len);
39267
39268diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
39269index 282b1f7..169f0cf 100644
39270--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
39271+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
39272@@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
39273 unsigned long flags;
39274 DECLARE_SSID_BUF(ssid);
39275
39276+ pax_track_stack();
39277+
39278 LIBIPW_DEBUG_SCAN("'%s' (%pM"
39279 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
39280 print_ssid(ssid, info_element->data, info_element->len),
39281diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
39282index 950267a..80d5fd2 100644
39283--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
39284+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
39285@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
39286 },
39287 };
39288
39289-static struct iwl_ops iwl1000_ops = {
39290+static const struct iwl_ops iwl1000_ops = {
39291 .ucode = &iwl5000_ucode,
39292 .lib = &iwl1000_lib,
39293 .hcmd = &iwl5000_hcmd,
39294diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
39295index 56bfcc3..b348020 100644
39296--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
39297+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
39298@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
39299 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
39300 };
39301
39302-static struct iwl_ops iwl3945_ops = {
39303+static const struct iwl_ops iwl3945_ops = {
39304 .ucode = &iwl3945_ucode,
39305 .lib = &iwl3945_lib,
39306 .hcmd = &iwl3945_hcmd,
39307diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
39308index 585b8d4..e142963 100644
39309--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
39310+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
39311@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
39312 },
39313 };
39314
39315-static struct iwl_ops iwl4965_ops = {
39316+static const struct iwl_ops iwl4965_ops = {
39317 .ucode = &iwl4965_ucode,
39318 .lib = &iwl4965_lib,
39319 .hcmd = &iwl4965_hcmd,
39320diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
39321index 1f423f2..e37c192 100644
39322--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
39323+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
39324@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
39325 },
39326 };
39327
39328-struct iwl_ops iwl5000_ops = {
39329+const struct iwl_ops iwl5000_ops = {
39330 .ucode = &iwl5000_ucode,
39331 .lib = &iwl5000_lib,
39332 .hcmd = &iwl5000_hcmd,
39333 .utils = &iwl5000_hcmd_utils,
39334 };
39335
39336-static struct iwl_ops iwl5150_ops = {
39337+static const struct iwl_ops iwl5150_ops = {
39338 .ucode = &iwl5000_ucode,
39339 .lib = &iwl5150_lib,
39340 .hcmd = &iwl5000_hcmd,
39341diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
39342index 1473452..f07d5e1 100644
39343--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
39344+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
39345@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
39346 .calc_rssi = iwl5000_calc_rssi,
39347 };
39348
39349-static struct iwl_ops iwl6000_ops = {
39350+static const struct iwl_ops iwl6000_ops = {
39351 .ucode = &iwl5000_ucode,
39352 .lib = &iwl6000_lib,
39353 .hcmd = &iwl5000_hcmd,
39354diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39355index 1a3dfa2..b3e0a61 100644
39356--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39357+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39358@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
39359 u8 active_index = 0;
39360 s32 tpt = 0;
39361
39362+ pax_track_stack();
39363+
39364 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
39365
39366 if (!ieee80211_is_data(hdr->frame_control) ||
39367@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
39368 u8 valid_tx_ant = 0;
39369 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
39370
39371+ pax_track_stack();
39372+
39373 /* Override starting rate (index 0) if needed for debug purposes */
39374 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
39375
39376diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
39377index 0e56d78..6a3c107 100644
39378--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
39379+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
39380@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39381 if (iwl_debug_level & IWL_DL_INFO)
39382 dev_printk(KERN_DEBUG, &(pdev->dev),
39383 "Disabling hw_scan\n");
39384- iwl_hw_ops.hw_scan = NULL;
39385+ pax_open_kernel();
39386+ *(void **)&iwl_hw_ops.hw_scan = NULL;
39387+ pax_close_kernel();
39388 }
39389
39390 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
39391diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
39392index cbc6290..eb323d7 100644
39393--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
39394+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
39395@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
39396 #endif
39397
39398 #else
39399-#define IWL_DEBUG(__priv, level, fmt, args...)
39400-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
39401+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
39402+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
39403 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
39404 void *p, u32 len)
39405 {}
39406diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39407index a198bcf..8e68233 100644
39408--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39409+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39410@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
39411 int pos = 0;
39412 const size_t bufsz = sizeof(buf);
39413
39414+ pax_track_stack();
39415+
39416 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
39417 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
39418 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
39419@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
39420 const size_t bufsz = sizeof(buf);
39421 ssize_t ret;
39422
39423+ pax_track_stack();
39424+
39425 for (i = 0; i < AC_NUM; i++) {
39426 pos += scnprintf(buf + pos, bufsz - pos,
39427 "\tcw_min\tcw_max\taifsn\ttxop\n");
39428diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
39429index 3539ea4..b174bfa 100644
39430--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
39431+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
39432@@ -68,7 +68,7 @@ struct iwl_tx_queue;
39433
39434 /* shared structures from iwl-5000.c */
39435 extern struct iwl_mod_params iwl50_mod_params;
39436-extern struct iwl_ops iwl5000_ops;
39437+extern const struct iwl_ops iwl5000_ops;
39438 extern struct iwl_ucode_ops iwl5000_ucode;
39439 extern struct iwl_lib_ops iwl5000_lib;
39440 extern struct iwl_hcmd_ops iwl5000_hcmd;
39441diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39442index 619590d..69235ee 100644
39443--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
39444+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39445@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
39446 */
39447 if (iwl3945_mod_params.disable_hw_scan) {
39448 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
39449- iwl3945_hw_ops.hw_scan = NULL;
39450+ pax_open_kernel();
39451+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
39452+ pax_close_kernel();
39453 }
39454
39455
39456diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39457index 1465379..fe4d78b 100644
39458--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
39459+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39460@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
39461 int buf_len = 512;
39462 size_t len = 0;
39463
39464+ pax_track_stack();
39465+
39466 if (*ppos != 0)
39467 return 0;
39468 if (count < sizeof(buf))
39469diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
39470index 893a55c..7f66a50 100644
39471--- a/drivers/net/wireless/libertas/debugfs.c
39472+++ b/drivers/net/wireless/libertas/debugfs.c
39473@@ -708,7 +708,7 @@ out_unlock:
39474 struct lbs_debugfs_files {
39475 const char *name;
39476 int perm;
39477- struct file_operations fops;
39478+ const struct file_operations fops;
39479 };
39480
39481 static const struct lbs_debugfs_files debugfs_files[] = {
39482diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
39483index 2ecbedb..42704f0 100644
39484--- a/drivers/net/wireless/rndis_wlan.c
39485+++ b/drivers/net/wireless/rndis_wlan.c
39486@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
39487
39488 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
39489
39490- if (rts_threshold < 0 || rts_threshold > 2347)
39491+ if (rts_threshold > 2347)
39492 rts_threshold = 2347;
39493
39494 tmp = cpu_to_le32(rts_threshold);
39495diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
39496index 5c4df24..3b42925 100644
39497--- a/drivers/oprofile/buffer_sync.c
39498+++ b/drivers/oprofile/buffer_sync.c
39499@@ -341,7 +341,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
39500 if (cookie == NO_COOKIE)
39501 offset = pc;
39502 if (cookie == INVALID_COOKIE) {
39503- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39504+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39505 offset = pc;
39506 }
39507 if (cookie != last_cookie) {
39508@@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
39509 /* add userspace sample */
39510
39511 if (!mm) {
39512- atomic_inc(&oprofile_stats.sample_lost_no_mm);
39513+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
39514 return 0;
39515 }
39516
39517 cookie = lookup_dcookie(mm, s->eip, &offset);
39518
39519 if (cookie == INVALID_COOKIE) {
39520- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39521+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39522 return 0;
39523 }
39524
39525@@ -561,7 +561,7 @@ void sync_buffer(int cpu)
39526 /* ignore backtraces if failed to add a sample */
39527 if (state == sb_bt_start) {
39528 state = sb_bt_ignore;
39529- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
39530+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
39531 }
39532 }
39533 release_mm(mm);
39534diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
39535index 5df60a6..72f5c1c 100644
39536--- a/drivers/oprofile/event_buffer.c
39537+++ b/drivers/oprofile/event_buffer.c
39538@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
39539 }
39540
39541 if (buffer_pos == buffer_size) {
39542- atomic_inc(&oprofile_stats.event_lost_overflow);
39543+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
39544 return;
39545 }
39546
39547diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
39548index dc8a042..fe5f315 100644
39549--- a/drivers/oprofile/oprof.c
39550+++ b/drivers/oprofile/oprof.c
39551@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
39552 if (oprofile_ops.switch_events())
39553 return;
39554
39555- atomic_inc(&oprofile_stats.multiplex_counter);
39556+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
39557 start_switch_worker();
39558 }
39559
39560diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
39561index 61689e8..387f7f8 100644
39562--- a/drivers/oprofile/oprofile_stats.c
39563+++ b/drivers/oprofile/oprofile_stats.c
39564@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
39565 cpu_buf->sample_invalid_eip = 0;
39566 }
39567
39568- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
39569- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
39570- atomic_set(&oprofile_stats.event_lost_overflow, 0);
39571- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
39572- atomic_set(&oprofile_stats.multiplex_counter, 0);
39573+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
39574+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
39575+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
39576+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
39577+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
39578 }
39579
39580
39581diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
39582index 0b54e46..a37c527 100644
39583--- a/drivers/oprofile/oprofile_stats.h
39584+++ b/drivers/oprofile/oprofile_stats.h
39585@@ -13,11 +13,11 @@
39586 #include <asm/atomic.h>
39587
39588 struct oprofile_stat_struct {
39589- atomic_t sample_lost_no_mm;
39590- atomic_t sample_lost_no_mapping;
39591- atomic_t bt_lost_no_mapping;
39592- atomic_t event_lost_overflow;
39593- atomic_t multiplex_counter;
39594+ atomic_unchecked_t sample_lost_no_mm;
39595+ atomic_unchecked_t sample_lost_no_mapping;
39596+ atomic_unchecked_t bt_lost_no_mapping;
39597+ atomic_unchecked_t event_lost_overflow;
39598+ atomic_unchecked_t multiplex_counter;
39599 };
39600
39601 extern struct oprofile_stat_struct oprofile_stats;
39602diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
39603index 2766a6d..80c77e2 100644
39604--- a/drivers/oprofile/oprofilefs.c
39605+++ b/drivers/oprofile/oprofilefs.c
39606@@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
39607
39608
39609 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
39610- char const *name, atomic_t *val)
39611+ char const *name, atomic_unchecked_t *val)
39612 {
39613 struct dentry *d = __oprofilefs_create_file(sb, root, name,
39614 &atomic_ro_fops, 0444);
39615diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
39616index 13a64bc..ad62835 100644
39617--- a/drivers/parisc/pdc_stable.c
39618+++ b/drivers/parisc/pdc_stable.c
39619@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
39620 return ret;
39621 }
39622
39623-static struct sysfs_ops pdcspath_attr_ops = {
39624+static const struct sysfs_ops pdcspath_attr_ops = {
39625 .show = pdcspath_attr_show,
39626 .store = pdcspath_attr_store,
39627 };
39628diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
39629index 8eefe56..40751a7 100644
39630--- a/drivers/parport/procfs.c
39631+++ b/drivers/parport/procfs.c
39632@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
39633
39634 *ppos += len;
39635
39636- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
39637+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
39638 }
39639
39640 #ifdef CONFIG_PARPORT_1284
39641@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
39642
39643 *ppos += len;
39644
39645- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
39646+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
39647 }
39648 #endif /* IEEE1284.3 support. */
39649
39650diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
39651index 73e7d8e..c80f3d2 100644
39652--- a/drivers/pci/hotplug/acpiphp_glue.c
39653+++ b/drivers/pci/hotplug/acpiphp_glue.c
39654@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
39655 }
39656
39657
39658-static struct acpi_dock_ops acpiphp_dock_ops = {
39659+static const struct acpi_dock_ops acpiphp_dock_ops = {
39660 .handler = handle_hotplug_event_func,
39661 };
39662
39663diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
39664index 9fff878..ad0ad53 100644
39665--- a/drivers/pci/hotplug/cpci_hotplug.h
39666+++ b/drivers/pci/hotplug/cpci_hotplug.h
39667@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
39668 int (*hardware_test) (struct slot* slot, u32 value);
39669 u8 (*get_power) (struct slot* slot);
39670 int (*set_power) (struct slot* slot, int value);
39671-};
39672+} __no_const;
39673
39674 struct cpci_hp_controller {
39675 unsigned int irq;
39676diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
39677index 76ba8a1..20ca857 100644
39678--- a/drivers/pci/hotplug/cpqphp_nvram.c
39679+++ b/drivers/pci/hotplug/cpqphp_nvram.c
39680@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
39681
39682 void compaq_nvram_init (void __iomem *rom_start)
39683 {
39684+
39685+#ifndef CONFIG_PAX_KERNEXEC
39686 if (rom_start) {
39687 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
39688 }
39689+#endif
39690+
39691 dbg("int15 entry = %p\n", compaq_int15_entry_point);
39692
39693 /* initialize our int15 lock */
39694diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
39695index 6151389..0a894ef 100644
39696--- a/drivers/pci/hotplug/fakephp.c
39697+++ b/drivers/pci/hotplug/fakephp.c
39698@@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
39699 }
39700
39701 static struct kobj_type legacy_ktype = {
39702- .sysfs_ops = &(struct sysfs_ops){
39703+ .sysfs_ops = &(const struct sysfs_ops){
39704 .store = legacy_store, .show = legacy_show
39705 },
39706 .release = &legacy_release,
39707diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
39708index 5b680df..fe05b7e 100644
39709--- a/drivers/pci/intel-iommu.c
39710+++ b/drivers/pci/intel-iommu.c
39711@@ -2643,7 +2643,7 @@ error:
39712 return 0;
39713 }
39714
39715-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
39716+dma_addr_t intel_map_page(struct device *dev, struct page *page,
39717 unsigned long offset, size_t size,
39718 enum dma_data_direction dir,
39719 struct dma_attrs *attrs)
39720@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
39721 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
39722 }
39723
39724-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39725+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39726 size_t size, enum dma_data_direction dir,
39727 struct dma_attrs *attrs)
39728 {
39729@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39730 }
39731 }
39732
39733-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
39734+void *intel_alloc_coherent(struct device *hwdev, size_t size,
39735 dma_addr_t *dma_handle, gfp_t flags)
39736 {
39737 void *vaddr;
39738@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
39739 return NULL;
39740 }
39741
39742-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39743+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39744 dma_addr_t dma_handle)
39745 {
39746 int order;
39747@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39748 free_pages((unsigned long)vaddr, order);
39749 }
39750
39751-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
39752+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
39753 int nelems, enum dma_data_direction dir,
39754 struct dma_attrs *attrs)
39755 {
39756@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
39757 return nelems;
39758 }
39759
39760-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
39761+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
39762 enum dma_data_direction dir, struct dma_attrs *attrs)
39763 {
39764 int i;
39765@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
39766 return nelems;
39767 }
39768
39769-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
39770+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
39771 {
39772 return !dma_addr;
39773 }
39774
39775-struct dma_map_ops intel_dma_ops = {
39776+const struct dma_map_ops intel_dma_ops = {
39777 .alloc_coherent = intel_alloc_coherent,
39778 .free_coherent = intel_free_coherent,
39779 .map_sg = intel_map_sg,
39780diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
39781index 5b7056c..607bc94 100644
39782--- a/drivers/pci/pcie/aspm.c
39783+++ b/drivers/pci/pcie/aspm.c
39784@@ -27,9 +27,9 @@
39785 #define MODULE_PARAM_PREFIX "pcie_aspm."
39786
39787 /* Note: those are not register definitions */
39788-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
39789-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
39790-#define ASPM_STATE_L1 (4) /* L1 state */
39791+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
39792+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
39793+#define ASPM_STATE_L1 (4U) /* L1 state */
39794 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
39795 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
39796
39797diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
39798index 8105e32..ca10419 100644
39799--- a/drivers/pci/probe.c
39800+++ b/drivers/pci/probe.c
39801@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
39802 return ret;
39803 }
39804
39805-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
39806+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
39807 struct device_attribute *attr,
39808 char *buf)
39809 {
39810 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
39811 }
39812
39813-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
39814+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
39815 struct device_attribute *attr,
39816 char *buf)
39817 {
39818diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
39819index a03ad8c..024b0da 100644
39820--- a/drivers/pci/proc.c
39821+++ b/drivers/pci/proc.c
39822@@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
39823 static int __init pci_proc_init(void)
39824 {
39825 struct pci_dev *dev = NULL;
39826+
39827+#ifdef CONFIG_GRKERNSEC_PROC_ADD
39828+#ifdef CONFIG_GRKERNSEC_PROC_USER
39829+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
39830+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39831+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
39832+#endif
39833+#else
39834 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
39835+#endif
39836 proc_create("devices", 0, proc_bus_pci_dir,
39837 &proc_bus_pci_dev_operations);
39838 proc_initialized = 1;
39839diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
39840index 8c02b6c..5584d8e 100644
39841--- a/drivers/pci/slot.c
39842+++ b/drivers/pci/slot.c
39843@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
39844 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
39845 }
39846
39847-static struct sysfs_ops pci_slot_sysfs_ops = {
39848+static const struct sysfs_ops pci_slot_sysfs_ops = {
39849 .show = pci_slot_attr_show,
39850 .store = pci_slot_attr_store,
39851 };
39852diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
39853index 30cf71d2..50938f1 100644
39854--- a/drivers/pcmcia/pcmcia_ioctl.c
39855+++ b/drivers/pcmcia/pcmcia_ioctl.c
39856@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
39857 return -EFAULT;
39858 }
39859 }
39860- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
39861+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
39862 if (!buf)
39863 return -ENOMEM;
39864
39865diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
39866index 52183c4..b224c69 100644
39867--- a/drivers/platform/x86/acer-wmi.c
39868+++ b/drivers/platform/x86/acer-wmi.c
39869@@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
39870 return 0;
39871 }
39872
39873-static struct backlight_ops acer_bl_ops = {
39874+static const struct backlight_ops acer_bl_ops = {
39875 .get_brightness = read_brightness,
39876 .update_status = update_bl_status,
39877 };
39878diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
39879index 767cb61..a87380b 100644
39880--- a/drivers/platform/x86/asus-laptop.c
39881+++ b/drivers/platform/x86/asus-laptop.c
39882@@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
39883 */
39884 static int read_brightness(struct backlight_device *bd);
39885 static int update_bl_status(struct backlight_device *bd);
39886-static struct backlight_ops asusbl_ops = {
39887+static const struct backlight_ops asusbl_ops = {
39888 .get_brightness = read_brightness,
39889 .update_status = update_bl_status,
39890 };
39891diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
39892index d66c07a..a4abaac 100644
39893--- a/drivers/platform/x86/asus_acpi.c
39894+++ b/drivers/platform/x86/asus_acpi.c
39895@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
39896 return 0;
39897 }
39898
39899-static struct backlight_ops asus_backlight_data = {
39900+static const struct backlight_ops asus_backlight_data = {
39901 .get_brightness = read_brightness,
39902 .update_status = set_brightness_status,
39903 };
39904diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
39905index 11003bb..550ff1b 100644
39906--- a/drivers/platform/x86/compal-laptop.c
39907+++ b/drivers/platform/x86/compal-laptop.c
39908@@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
39909 return set_lcd_level(b->props.brightness);
39910 }
39911
39912-static struct backlight_ops compalbl_ops = {
39913+static const struct backlight_ops compalbl_ops = {
39914 .get_brightness = bl_get_brightness,
39915 .update_status = bl_update_status,
39916 };
39917diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
39918index 07a74da..9dc99fa 100644
39919--- a/drivers/platform/x86/dell-laptop.c
39920+++ b/drivers/platform/x86/dell-laptop.c
39921@@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
39922 return buffer.output[1];
39923 }
39924
39925-static struct backlight_ops dell_ops = {
39926+static const struct backlight_ops dell_ops = {
39927 .get_brightness = dell_get_intensity,
39928 .update_status = dell_send_intensity,
39929 };
39930diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
39931index c533b1c..5c81f22 100644
39932--- a/drivers/platform/x86/eeepc-laptop.c
39933+++ b/drivers/platform/x86/eeepc-laptop.c
39934@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
39935 */
39936 static int read_brightness(struct backlight_device *bd);
39937 static int update_bl_status(struct backlight_device *bd);
39938-static struct backlight_ops eeepcbl_ops = {
39939+static const struct backlight_ops eeepcbl_ops = {
39940 .get_brightness = read_brightness,
39941 .update_status = update_bl_status,
39942 };
39943diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
39944index bcd4ba8..a249b35 100644
39945--- a/drivers/platform/x86/fujitsu-laptop.c
39946+++ b/drivers/platform/x86/fujitsu-laptop.c
39947@@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
39948 return ret;
39949 }
39950
39951-static struct backlight_ops fujitsubl_ops = {
39952+static const struct backlight_ops fujitsubl_ops = {
39953 .get_brightness = bl_get_brightness,
39954 .update_status = bl_update_status,
39955 };
39956diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
39957index 759763d..1093ba2 100644
39958--- a/drivers/platform/x86/msi-laptop.c
39959+++ b/drivers/platform/x86/msi-laptop.c
39960@@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
39961 return set_lcd_level(b->props.brightness);
39962 }
39963
39964-static struct backlight_ops msibl_ops = {
39965+static const struct backlight_ops msibl_ops = {
39966 .get_brightness = bl_get_brightness,
39967 .update_status = bl_update_status,
39968 };
39969diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
39970index fe7cf01..9012d8d 100644
39971--- a/drivers/platform/x86/panasonic-laptop.c
39972+++ b/drivers/platform/x86/panasonic-laptop.c
39973@@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
39974 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
39975 }
39976
39977-static struct backlight_ops pcc_backlight_ops = {
39978+static const struct backlight_ops pcc_backlight_ops = {
39979 .get_brightness = bl_get,
39980 .update_status = bl_set_status,
39981 };
39982diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
39983index a2a742c..b37e25e 100644
39984--- a/drivers/platform/x86/sony-laptop.c
39985+++ b/drivers/platform/x86/sony-laptop.c
39986@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
39987 }
39988
39989 static struct backlight_device *sony_backlight_device;
39990-static struct backlight_ops sony_backlight_ops = {
39991+static const struct backlight_ops sony_backlight_ops = {
39992 .update_status = sony_backlight_update_status,
39993 .get_brightness = sony_backlight_get_brightness,
39994 };
39995diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
39996index 68271ae..5e8fb10 100644
39997--- a/drivers/platform/x86/thinkpad_acpi.c
39998+++ b/drivers/platform/x86/thinkpad_acpi.c
39999@@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
40000 return 0;
40001 }
40002
40003-void static hotkey_mask_warn_incomplete_mask(void)
40004+static void hotkey_mask_warn_incomplete_mask(void)
40005 {
40006 /* log only what the user can fix... */
40007 const u32 wantedmask = hotkey_driver_mask &
40008@@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
40009 BACKLIGHT_UPDATE_HOTKEY);
40010 }
40011
40012-static struct backlight_ops ibm_backlight_data = {
40013+static const struct backlight_ops ibm_backlight_data = {
40014 .get_brightness = brightness_get,
40015 .update_status = brightness_update_status,
40016 };
40017diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
40018index 51c0a8b..0786629 100644
40019--- a/drivers/platform/x86/toshiba_acpi.c
40020+++ b/drivers/platform/x86/toshiba_acpi.c
40021@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
40022 return AE_OK;
40023 }
40024
40025-static struct backlight_ops toshiba_backlight_data = {
40026+static const struct backlight_ops toshiba_backlight_data = {
40027 .get_brightness = get_lcd,
40028 .update_status = set_lcd_status,
40029 };
40030diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40031index fc83783c..cf370d7 100644
40032--- a/drivers/pnp/pnpbios/bioscalls.c
40033+++ b/drivers/pnp/pnpbios/bioscalls.c
40034@@ -60,7 +60,7 @@ do { \
40035 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40036 } while(0)
40037
40038-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40039+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40040 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40041
40042 /*
40043@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40044
40045 cpu = get_cpu();
40046 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40047+
40048+ pax_open_kernel();
40049 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40050+ pax_close_kernel();
40051
40052 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40053 spin_lock_irqsave(&pnp_bios_lock, flags);
40054@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40055 :"memory");
40056 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40057
40058+ pax_open_kernel();
40059 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40060+ pax_close_kernel();
40061+
40062 put_cpu();
40063
40064 /* If we get here and this is set then the PnP BIOS faulted on us. */
40065@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40066 return status;
40067 }
40068
40069-void pnpbios_calls_init(union pnp_bios_install_struct *header)
40070+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40071 {
40072 int i;
40073
40074@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40075 pnp_bios_callpoint.offset = header->fields.pm16offset;
40076 pnp_bios_callpoint.segment = PNP_CS16;
40077
40078+ pax_open_kernel();
40079+
40080 for_each_possible_cpu(i) {
40081 struct desc_struct *gdt = get_cpu_gdt_table(i);
40082 if (!gdt)
40083@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40084 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40085 (unsigned long)__va(header->fields.pm16dseg));
40086 }
40087+
40088+ pax_close_kernel();
40089 }
40090diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40091index ba97654..66b99d4 100644
40092--- a/drivers/pnp/resource.c
40093+++ b/drivers/pnp/resource.c
40094@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40095 return 1;
40096
40097 /* check if the resource is valid */
40098- if (*irq < 0 || *irq > 15)
40099+ if (*irq > 15)
40100 return 0;
40101
40102 /* check if the resource is reserved */
40103@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40104 return 1;
40105
40106 /* check if the resource is valid */
40107- if (*dma < 0 || *dma == 4 || *dma > 7)
40108+ if (*dma == 4 || *dma > 7)
40109 return 0;
40110
40111 /* check if the resource is reserved */
40112diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
40113index 62bb981..24a2dc9 100644
40114--- a/drivers/power/bq27x00_battery.c
40115+++ b/drivers/power/bq27x00_battery.c
40116@@ -44,7 +44,7 @@ struct bq27x00_device_info;
40117 struct bq27x00_access_methods {
40118 int (*read)(u8 reg, int *rt_value, int b_single,
40119 struct bq27x00_device_info *di);
40120-};
40121+} __no_const;
40122
40123 struct bq27x00_device_info {
40124 struct device *dev;
40125diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40126index 62227cd..b5b538b 100644
40127--- a/drivers/rtc/rtc-dev.c
40128+++ b/drivers/rtc/rtc-dev.c
40129@@ -14,6 +14,7 @@
40130 #include <linux/module.h>
40131 #include <linux/rtc.h>
40132 #include <linux/sched.h>
40133+#include <linux/grsecurity.h>
40134 #include "rtc-core.h"
40135
40136 static dev_t rtc_devt;
40137@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
40138 if (copy_from_user(&tm, uarg, sizeof(tm)))
40139 return -EFAULT;
40140
40141+ gr_log_timechange();
40142+
40143 return rtc_set_time(rtc, &tm);
40144
40145 case RTC_PIE_ON:
40146diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
40147index 968e3c7..fbc637a 100644
40148--- a/drivers/s390/cio/qdio_perf.c
40149+++ b/drivers/s390/cio/qdio_perf.c
40150@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
40151 static int qdio_perf_proc_show(struct seq_file *m, void *v)
40152 {
40153 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
40154- (long)atomic_long_read(&perf_stats.qdio_int));
40155+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
40156 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
40157- (long)atomic_long_read(&perf_stats.pci_int));
40158+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
40159 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
40160- (long)atomic_long_read(&perf_stats.thin_int));
40161+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
40162 seq_printf(m, "\n");
40163 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
40164- (long)atomic_long_read(&perf_stats.tasklet_inbound));
40165+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
40166 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
40167- (long)atomic_long_read(&perf_stats.tasklet_outbound));
40168+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
40169 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
40170- (long)atomic_long_read(&perf_stats.tasklet_thinint),
40171- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
40172+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
40173+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
40174 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
40175- (long)atomic_long_read(&perf_stats.thinint_inbound),
40176- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
40177+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
40178+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
40179 seq_printf(m, "\n");
40180 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
40181- (long)atomic_long_read(&perf_stats.siga_in));
40182+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
40183 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
40184- (long)atomic_long_read(&perf_stats.siga_out));
40185+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
40186 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
40187- (long)atomic_long_read(&perf_stats.siga_sync));
40188+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
40189 seq_printf(m, "\n");
40190 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
40191- (long)atomic_long_read(&perf_stats.inbound_handler));
40192+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
40193 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
40194- (long)atomic_long_read(&perf_stats.outbound_handler));
40195+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
40196 seq_printf(m, "\n");
40197 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
40198- (long)atomic_long_read(&perf_stats.fast_requeue));
40199+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
40200 seq_printf(m, "Number of outbound target full condition\t: %li\n",
40201- (long)atomic_long_read(&perf_stats.outbound_target_full));
40202+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
40203 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
40204- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
40205+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
40206 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
40207- (long)atomic_long_read(&perf_stats.debug_stop_polling));
40208+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
40209 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
40210- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
40211+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
40212 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
40213- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
40214- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
40215+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
40216+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
40217 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
40218- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
40219- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
40220+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
40221+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
40222 seq_printf(m, "\n");
40223 return 0;
40224 }
40225diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
40226index ff4504c..b3604c3 100644
40227--- a/drivers/s390/cio/qdio_perf.h
40228+++ b/drivers/s390/cio/qdio_perf.h
40229@@ -13,46 +13,46 @@
40230
40231 struct qdio_perf_stats {
40232 /* interrupt handler calls */
40233- atomic_long_t qdio_int;
40234- atomic_long_t pci_int;
40235- atomic_long_t thin_int;
40236+ atomic_long_unchecked_t qdio_int;
40237+ atomic_long_unchecked_t pci_int;
40238+ atomic_long_unchecked_t thin_int;
40239
40240 /* tasklet runs */
40241- atomic_long_t tasklet_inbound;
40242- atomic_long_t tasklet_outbound;
40243- atomic_long_t tasklet_thinint;
40244- atomic_long_t tasklet_thinint_loop;
40245- atomic_long_t thinint_inbound;
40246- atomic_long_t thinint_inbound_loop;
40247- atomic_long_t thinint_inbound_loop2;
40248+ atomic_long_unchecked_t tasklet_inbound;
40249+ atomic_long_unchecked_t tasklet_outbound;
40250+ atomic_long_unchecked_t tasklet_thinint;
40251+ atomic_long_unchecked_t tasklet_thinint_loop;
40252+ atomic_long_unchecked_t thinint_inbound;
40253+ atomic_long_unchecked_t thinint_inbound_loop;
40254+ atomic_long_unchecked_t thinint_inbound_loop2;
40255
40256 /* signal adapter calls */
40257- atomic_long_t siga_out;
40258- atomic_long_t siga_in;
40259- atomic_long_t siga_sync;
40260+ atomic_long_unchecked_t siga_out;
40261+ atomic_long_unchecked_t siga_in;
40262+ atomic_long_unchecked_t siga_sync;
40263
40264 /* misc */
40265- atomic_long_t inbound_handler;
40266- atomic_long_t outbound_handler;
40267- atomic_long_t fast_requeue;
40268- atomic_long_t outbound_target_full;
40269+ atomic_long_unchecked_t inbound_handler;
40270+ atomic_long_unchecked_t outbound_handler;
40271+ atomic_long_unchecked_t fast_requeue;
40272+ atomic_long_unchecked_t outbound_target_full;
40273
40274 /* for debugging */
40275- atomic_long_t debug_tl_out_timer;
40276- atomic_long_t debug_stop_polling;
40277- atomic_long_t debug_eqbs_all;
40278- atomic_long_t debug_eqbs_incomplete;
40279- atomic_long_t debug_sqbs_all;
40280- atomic_long_t debug_sqbs_incomplete;
40281+ atomic_long_unchecked_t debug_tl_out_timer;
40282+ atomic_long_unchecked_t debug_stop_polling;
40283+ atomic_long_unchecked_t debug_eqbs_all;
40284+ atomic_long_unchecked_t debug_eqbs_incomplete;
40285+ atomic_long_unchecked_t debug_sqbs_all;
40286+ atomic_long_unchecked_t debug_sqbs_incomplete;
40287 };
40288
40289 extern struct qdio_perf_stats perf_stats;
40290 extern int qdio_performance_stats;
40291
40292-static inline void qdio_perf_stat_inc(atomic_long_t *count)
40293+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
40294 {
40295 if (qdio_performance_stats)
40296- atomic_long_inc(count);
40297+ atomic_long_inc_unchecked(count);
40298 }
40299
40300 int qdio_setup_perf_stats(void);
40301diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
40302index 1ddcf40..a85f062 100644
40303--- a/drivers/scsi/BusLogic.c
40304+++ b/drivers/scsi/BusLogic.c
40305@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
40306 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
40307 *PrototypeHostAdapter)
40308 {
40309+ pax_track_stack();
40310+
40311 /*
40312 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
40313 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
40314diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
40315index cdbdec9..b7d560b 100644
40316--- a/drivers/scsi/aacraid/aacraid.h
40317+++ b/drivers/scsi/aacraid/aacraid.h
40318@@ -471,7 +471,7 @@ struct adapter_ops
40319 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
40320 /* Administrative operations */
40321 int (*adapter_comm)(struct aac_dev * dev, int comm);
40322-};
40323+} __no_const;
40324
40325 /*
40326 * Define which interrupt handler needs to be installed
40327diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
40328index a5b8e7b..a6a0e43 100644
40329--- a/drivers/scsi/aacraid/commctrl.c
40330+++ b/drivers/scsi/aacraid/commctrl.c
40331@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
40332 u32 actual_fibsize64, actual_fibsize = 0;
40333 int i;
40334
40335+ pax_track_stack();
40336
40337 if (dev->in_reset) {
40338 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
40339diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
40340index 9b97c3e..f099725 100644
40341--- a/drivers/scsi/aacraid/linit.c
40342+++ b/drivers/scsi/aacraid/linit.c
40343@@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
40344 #elif defined(__devinitconst)
40345 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40346 #else
40347-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
40348+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40349 #endif
40350 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
40351 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
40352diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
40353index 996f722..9127845 100644
40354--- a/drivers/scsi/aic94xx/aic94xx_init.c
40355+++ b/drivers/scsi/aic94xx/aic94xx_init.c
40356@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
40357 flash_error_table[i].reason);
40358 }
40359
40360-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
40361+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
40362 asd_show_update_bios, asd_store_update_bios);
40363
40364 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
40365@@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
40366 .lldd_control_phy = asd_control_phy,
40367 };
40368
40369-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
40370+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
40371 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
40372 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
40373 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
40374diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40375index 58efd4b..cb48dc7 100644
40376--- a/drivers/scsi/bfa/bfa_ioc.h
40377+++ b/drivers/scsi/bfa/bfa_ioc.h
40378@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
40379 bfa_ioc_disable_cbfn_t disable_cbfn;
40380 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40381 bfa_ioc_reset_cbfn_t reset_cbfn;
40382-};
40383+} __no_const;
40384
40385 /**
40386 * Heartbeat failure notification queue element.
40387diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
40388index 7ad177e..5503586 100644
40389--- a/drivers/scsi/bfa/bfa_iocfc.h
40390+++ b/drivers/scsi/bfa/bfa_iocfc.h
40391@@ -61,7 +61,7 @@ struct bfa_hwif_s {
40392 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
40393 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
40394 u32 *nvecs, u32 *maxvec);
40395-};
40396+} __no_const;
40397 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
40398
40399 struct bfa_iocfc_s {
40400diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
40401index 4967643..cbec06b 100644
40402--- a/drivers/scsi/dpt_i2o.c
40403+++ b/drivers/scsi/dpt_i2o.c
40404@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
40405 dma_addr_t addr;
40406 ulong flags = 0;
40407
40408+ pax_track_stack();
40409+
40410 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
40411 // get user msg size in u32s
40412 if(get_user(size, &user_msg[0])){
40413@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
40414 s32 rcode;
40415 dma_addr_t addr;
40416
40417+ pax_track_stack();
40418+
40419 memset(msg, 0 , sizeof(msg));
40420 len = scsi_bufflen(cmd);
40421 direction = 0x00000000;
40422diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
40423index c7076ce..e20c67c 100644
40424--- a/drivers/scsi/eata.c
40425+++ b/drivers/scsi/eata.c
40426@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
40427 struct hostdata *ha;
40428 char name[16];
40429
40430+ pax_track_stack();
40431+
40432 sprintf(name, "%s%d", driver_name, j);
40433
40434 if (!request_region(port_base, REGION_SIZE, driver_name)) {
40435diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
40436index 11ae5c9..891daec 100644
40437--- a/drivers/scsi/fcoe/libfcoe.c
40438+++ b/drivers/scsi/fcoe/libfcoe.c
40439@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
40440 size_t rlen;
40441 size_t dlen;
40442
40443+ pax_track_stack();
40444+
40445 fiph = (struct fip_header *)skb->data;
40446 sub = fiph->fip_subcode;
40447 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
40448diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
40449index 71c7bbe..e93088a 100644
40450--- a/drivers/scsi/fnic/fnic_main.c
40451+++ b/drivers/scsi/fnic/fnic_main.c
40452@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
40453 /* Start local port initiatialization */
40454
40455 lp->link_up = 0;
40456- lp->tt = fnic_transport_template;
40457+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
40458
40459 lp->max_retry_count = fnic->config.flogi_retries;
40460 lp->max_rport_retry_count = fnic->config.plogi_retries;
40461diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
40462index bb96d74..9ec3ce4 100644
40463--- a/drivers/scsi/gdth.c
40464+++ b/drivers/scsi/gdth.c
40465@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
40466 ulong flags;
40467 gdth_ha_str *ha;
40468
40469+ pax_track_stack();
40470+
40471 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
40472 return -EFAULT;
40473 ha = gdth_find_ha(ldrv.ionode);
40474@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
40475 gdth_ha_str *ha;
40476 int rval;
40477
40478+ pax_track_stack();
40479+
40480 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
40481 res.number >= MAX_HDRIVES)
40482 return -EFAULT;
40483@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
40484 gdth_ha_str *ha;
40485 int rval;
40486
40487+ pax_track_stack();
40488+
40489 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
40490 return -EFAULT;
40491 ha = gdth_find_ha(gen.ionode);
40492@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
40493 int i;
40494 gdth_cmd_str gdtcmd;
40495 char cmnd[MAX_COMMAND_SIZE];
40496+
40497+ pax_track_stack();
40498+
40499 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
40500
40501 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
40502diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
40503index 1258da3..20d8ae6 100644
40504--- a/drivers/scsi/gdth_proc.c
40505+++ b/drivers/scsi/gdth_proc.c
40506@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
40507 ulong64 paddr;
40508
40509 char cmnd[MAX_COMMAND_SIZE];
40510+
40511+ pax_track_stack();
40512+
40513 memset(cmnd, 0xff, 12);
40514 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
40515
40516@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
40517 gdth_hget_str *phg;
40518 char cmnd[MAX_COMMAND_SIZE];
40519
40520+ pax_track_stack();
40521+
40522 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
40523 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
40524 if (!gdtcmd || !estr)
40525diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
40526index d03a926..f324286 100644
40527--- a/drivers/scsi/hosts.c
40528+++ b/drivers/scsi/hosts.c
40529@@ -40,7 +40,7 @@
40530 #include "scsi_logging.h"
40531
40532
40533-static atomic_t scsi_host_next_hn; /* host_no for next new host */
40534+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
40535
40536
40537 static void scsi_host_cls_release(struct device *dev)
40538@@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
40539 * subtract one because we increment first then return, but we need to
40540 * know what the next host number was before increment
40541 */
40542- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
40543+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
40544 shost->dma_channel = 0xff;
40545
40546 /* These three are default values which can be overridden */
40547diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
40548index a601159..55e19d2 100644
40549--- a/drivers/scsi/ipr.c
40550+++ b/drivers/scsi/ipr.c
40551@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
40552 return true;
40553 }
40554
40555-static struct ata_port_operations ipr_sata_ops = {
40556+static const struct ata_port_operations ipr_sata_ops = {
40557 .phy_reset = ipr_ata_phy_reset,
40558 .hardreset = ipr_sata_reset,
40559 .post_internal_cmd = ipr_ata_post_internal,
40560diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
40561index 4e49fbc..97907ff 100644
40562--- a/drivers/scsi/ips.h
40563+++ b/drivers/scsi/ips.h
40564@@ -1027,7 +1027,7 @@ typedef struct {
40565 int (*intr)(struct ips_ha *);
40566 void (*enableint)(struct ips_ha *);
40567 uint32_t (*statupd)(struct ips_ha *);
40568-} ips_hw_func_t;
40569+} __no_const ips_hw_func_t;
40570
40571 typedef struct ips_ha {
40572 uint8_t ha_id[IPS_MAX_CHANNELS+1];
40573diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
40574index c1c1574..a9c9348 100644
40575--- a/drivers/scsi/libfc/fc_exch.c
40576+++ b/drivers/scsi/libfc/fc_exch.c
40577@@ -86,12 +86,12 @@ struct fc_exch_mgr {
40578 * all together if not used XXX
40579 */
40580 struct {
40581- atomic_t no_free_exch;
40582- atomic_t no_free_exch_xid;
40583- atomic_t xid_not_found;
40584- atomic_t xid_busy;
40585- atomic_t seq_not_found;
40586- atomic_t non_bls_resp;
40587+ atomic_unchecked_t no_free_exch;
40588+ atomic_unchecked_t no_free_exch_xid;
40589+ atomic_unchecked_t xid_not_found;
40590+ atomic_unchecked_t xid_busy;
40591+ atomic_unchecked_t seq_not_found;
40592+ atomic_unchecked_t non_bls_resp;
40593 } stats;
40594 };
40595 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
40596@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
40597 /* allocate memory for exchange */
40598 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
40599 if (!ep) {
40600- atomic_inc(&mp->stats.no_free_exch);
40601+ atomic_inc_unchecked(&mp->stats.no_free_exch);
40602 goto out;
40603 }
40604 memset(ep, 0, sizeof(*ep));
40605@@ -557,7 +557,7 @@ out:
40606 return ep;
40607 err:
40608 spin_unlock_bh(&pool->lock);
40609- atomic_inc(&mp->stats.no_free_exch_xid);
40610+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
40611 mempool_free(ep, mp->ep_pool);
40612 return NULL;
40613 }
40614@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40615 xid = ntohs(fh->fh_ox_id); /* we originated exch */
40616 ep = fc_exch_find(mp, xid);
40617 if (!ep) {
40618- atomic_inc(&mp->stats.xid_not_found);
40619+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40620 reject = FC_RJT_OX_ID;
40621 goto out;
40622 }
40623@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40624 ep = fc_exch_find(mp, xid);
40625 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
40626 if (ep) {
40627- atomic_inc(&mp->stats.xid_busy);
40628+ atomic_inc_unchecked(&mp->stats.xid_busy);
40629 reject = FC_RJT_RX_ID;
40630 goto rel;
40631 }
40632@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40633 }
40634 xid = ep->xid; /* get our XID */
40635 } else if (!ep) {
40636- atomic_inc(&mp->stats.xid_not_found);
40637+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40638 reject = FC_RJT_RX_ID; /* XID not found */
40639 goto out;
40640 }
40641@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40642 } else {
40643 sp = &ep->seq;
40644 if (sp->id != fh->fh_seq_id) {
40645- atomic_inc(&mp->stats.seq_not_found);
40646+ atomic_inc_unchecked(&mp->stats.seq_not_found);
40647 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
40648 goto rel;
40649 }
40650@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40651
40652 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
40653 if (!ep) {
40654- atomic_inc(&mp->stats.xid_not_found);
40655+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40656 goto out;
40657 }
40658 if (ep->esb_stat & ESB_ST_COMPLETE) {
40659- atomic_inc(&mp->stats.xid_not_found);
40660+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40661 goto out;
40662 }
40663 if (ep->rxid == FC_XID_UNKNOWN)
40664 ep->rxid = ntohs(fh->fh_rx_id);
40665 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
40666- atomic_inc(&mp->stats.xid_not_found);
40667+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40668 goto rel;
40669 }
40670 if (ep->did != ntoh24(fh->fh_s_id) &&
40671 ep->did != FC_FID_FLOGI) {
40672- atomic_inc(&mp->stats.xid_not_found);
40673+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40674 goto rel;
40675 }
40676 sof = fr_sof(fp);
40677@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40678 } else {
40679 sp = &ep->seq;
40680 if (sp->id != fh->fh_seq_id) {
40681- atomic_inc(&mp->stats.seq_not_found);
40682+ atomic_inc_unchecked(&mp->stats.seq_not_found);
40683 goto rel;
40684 }
40685 }
40686@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40687 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
40688
40689 if (!sp)
40690- atomic_inc(&mp->stats.xid_not_found);
40691+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40692 else
40693- atomic_inc(&mp->stats.non_bls_resp);
40694+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
40695
40696 fc_frame_free(fp);
40697 }
40698diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
40699index 0ee989f..a582241 100644
40700--- a/drivers/scsi/libsas/sas_ata.c
40701+++ b/drivers/scsi/libsas/sas_ata.c
40702@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
40703 }
40704 }
40705
40706-static struct ata_port_operations sas_sata_ops = {
40707+static const struct ata_port_operations sas_sata_ops = {
40708 .phy_reset = sas_ata_phy_reset,
40709 .post_internal_cmd = sas_ata_post_internal,
40710 .qc_defer = ata_std_qc_defer,
40711diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
40712index aa10f79..5cc79e4 100644
40713--- a/drivers/scsi/lpfc/lpfc.h
40714+++ b/drivers/scsi/lpfc/lpfc.h
40715@@ -400,7 +400,7 @@ struct lpfc_vport {
40716 struct dentry *debug_nodelist;
40717 struct dentry *vport_debugfs_root;
40718 struct lpfc_debugfs_trc *disc_trc;
40719- atomic_t disc_trc_cnt;
40720+ atomic_unchecked_t disc_trc_cnt;
40721 #endif
40722 uint8_t stat_data_enabled;
40723 uint8_t stat_data_blocked;
40724@@ -725,8 +725,8 @@ struct lpfc_hba {
40725 struct timer_list fabric_block_timer;
40726 unsigned long bit_flags;
40727 #define FABRIC_COMANDS_BLOCKED 0
40728- atomic_t num_rsrc_err;
40729- atomic_t num_cmd_success;
40730+ atomic_unchecked_t num_rsrc_err;
40731+ atomic_unchecked_t num_cmd_success;
40732 unsigned long last_rsrc_error_time;
40733 unsigned long last_ramp_down_time;
40734 unsigned long last_ramp_up_time;
40735@@ -740,7 +740,7 @@ struct lpfc_hba {
40736 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
40737 struct dentry *debug_slow_ring_trc;
40738 struct lpfc_debugfs_trc *slow_ring_trc;
40739- atomic_t slow_ring_trc_cnt;
40740+ atomic_unchecked_t slow_ring_trc_cnt;
40741 #endif
40742
40743 /* Used for deferred freeing of ELS data buffers */
40744diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
40745index 8d0f0de..7c77a62 100644
40746--- a/drivers/scsi/lpfc/lpfc_debugfs.c
40747+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
40748@@ -124,7 +124,7 @@ struct lpfc_debug {
40749 int len;
40750 };
40751
40752-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
40753+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
40754 static unsigned long lpfc_debugfs_start_time = 0L;
40755
40756 /**
40757@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
40758 lpfc_debugfs_enable = 0;
40759
40760 len = 0;
40761- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
40762+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
40763 (lpfc_debugfs_max_disc_trc - 1);
40764 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
40765 dtp = vport->disc_trc + i;
40766@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
40767 lpfc_debugfs_enable = 0;
40768
40769 len = 0;
40770- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
40771+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
40772 (lpfc_debugfs_max_slow_ring_trc - 1);
40773 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
40774 dtp = phba->slow_ring_trc + i;
40775@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
40776 uint32_t *ptr;
40777 char buffer[1024];
40778
40779+ pax_track_stack();
40780+
40781 off = 0;
40782 spin_lock_irq(&phba->hbalock);
40783
40784@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
40785 !vport || !vport->disc_trc)
40786 return;
40787
40788- index = atomic_inc_return(&vport->disc_trc_cnt) &
40789+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
40790 (lpfc_debugfs_max_disc_trc - 1);
40791 dtp = vport->disc_trc + index;
40792 dtp->fmt = fmt;
40793 dtp->data1 = data1;
40794 dtp->data2 = data2;
40795 dtp->data3 = data3;
40796- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
40797+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
40798 dtp->jif = jiffies;
40799 #endif
40800 return;
40801@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
40802 !phba || !phba->slow_ring_trc)
40803 return;
40804
40805- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
40806+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
40807 (lpfc_debugfs_max_slow_ring_trc - 1);
40808 dtp = phba->slow_ring_trc + index;
40809 dtp->fmt = fmt;
40810 dtp->data1 = data1;
40811 dtp->data2 = data2;
40812 dtp->data3 = data3;
40813- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
40814+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
40815 dtp->jif = jiffies;
40816 #endif
40817 return;
40818@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
40819 "slow_ring buffer\n");
40820 goto debug_failed;
40821 }
40822- atomic_set(&phba->slow_ring_trc_cnt, 0);
40823+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
40824 memset(phba->slow_ring_trc, 0,
40825 (sizeof(struct lpfc_debugfs_trc) *
40826 lpfc_debugfs_max_slow_ring_trc));
40827@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
40828 "buffer\n");
40829 goto debug_failed;
40830 }
40831- atomic_set(&vport->disc_trc_cnt, 0);
40832+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
40833
40834 snprintf(name, sizeof(name), "discovery_trace");
40835 vport->debug_disc_trc =
40836diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
40837index 549bc7d..8189dbb 100644
40838--- a/drivers/scsi/lpfc/lpfc_init.c
40839+++ b/drivers/scsi/lpfc/lpfc_init.c
40840@@ -8021,8 +8021,10 @@ lpfc_init(void)
40841 printk(LPFC_COPYRIGHT "\n");
40842
40843 if (lpfc_enable_npiv) {
40844- lpfc_transport_functions.vport_create = lpfc_vport_create;
40845- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
40846+ pax_open_kernel();
40847+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
40848+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
40849+ pax_close_kernel();
40850 }
40851 lpfc_transport_template =
40852 fc_attach_transport(&lpfc_transport_functions);
40853diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
40854index c88f59f..ff2a42f 100644
40855--- a/drivers/scsi/lpfc/lpfc_scsi.c
40856+++ b/drivers/scsi/lpfc/lpfc_scsi.c
40857@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
40858 uint32_t evt_posted;
40859
40860 spin_lock_irqsave(&phba->hbalock, flags);
40861- atomic_inc(&phba->num_rsrc_err);
40862+ atomic_inc_unchecked(&phba->num_rsrc_err);
40863 phba->last_rsrc_error_time = jiffies;
40864
40865 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
40866@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
40867 unsigned long flags;
40868 struct lpfc_hba *phba = vport->phba;
40869 uint32_t evt_posted;
40870- atomic_inc(&phba->num_cmd_success);
40871+ atomic_inc_unchecked(&phba->num_cmd_success);
40872
40873 if (vport->cfg_lun_queue_depth <= queue_depth)
40874 return;
40875@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
40876 int i;
40877 struct lpfc_rport_data *rdata;
40878
40879- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
40880- num_cmd_success = atomic_read(&phba->num_cmd_success);
40881+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
40882+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
40883
40884 vports = lpfc_create_vport_work_array(phba);
40885 if (vports != NULL)
40886@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
40887 }
40888 }
40889 lpfc_destroy_vport_work_array(phba, vports);
40890- atomic_set(&phba->num_rsrc_err, 0);
40891- atomic_set(&phba->num_cmd_success, 0);
40892+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
40893+ atomic_set_unchecked(&phba->num_cmd_success, 0);
40894 }
40895
40896 /**
40897@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
40898 }
40899 }
40900 lpfc_destroy_vport_work_array(phba, vports);
40901- atomic_set(&phba->num_rsrc_err, 0);
40902- atomic_set(&phba->num_cmd_success, 0);
40903+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
40904+ atomic_set_unchecked(&phba->num_cmd_success, 0);
40905 }
40906
40907 /**
40908diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
40909index 234f0b7..3020aea 100644
40910--- a/drivers/scsi/megaraid/megaraid_mbox.c
40911+++ b/drivers/scsi/megaraid/megaraid_mbox.c
40912@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
40913 int rval;
40914 int i;
40915
40916+ pax_track_stack();
40917+
40918 // Allocate memory for the base list of scb for management module.
40919 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
40920
40921diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
40922index 7a117c1..ee01e9e 100644
40923--- a/drivers/scsi/osd/osd_initiator.c
40924+++ b/drivers/scsi/osd/osd_initiator.c
40925@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
40926 int nelem = ARRAY_SIZE(get_attrs), a = 0;
40927 int ret;
40928
40929+ pax_track_stack();
40930+
40931 or = osd_start_request(od, GFP_KERNEL);
40932 if (!or)
40933 return -ENOMEM;
40934diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
40935index 9ab8c86..9425ad3 100644
40936--- a/drivers/scsi/pmcraid.c
40937+++ b/drivers/scsi/pmcraid.c
40938@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
40939 res->scsi_dev = scsi_dev;
40940 scsi_dev->hostdata = res;
40941 res->change_detected = 0;
40942- atomic_set(&res->read_failures, 0);
40943- atomic_set(&res->write_failures, 0);
40944+ atomic_set_unchecked(&res->read_failures, 0);
40945+ atomic_set_unchecked(&res->write_failures, 0);
40946 rc = 0;
40947 }
40948 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
40949@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
40950
40951 /* If this was a SCSI read/write command keep count of errors */
40952 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
40953- atomic_inc(&res->read_failures);
40954+ atomic_inc_unchecked(&res->read_failures);
40955 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
40956- atomic_inc(&res->write_failures);
40957+ atomic_inc_unchecked(&res->write_failures);
40958
40959 if (!RES_IS_GSCSI(res->cfg_entry) &&
40960 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
40961@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
40962
40963 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
40964 /* add resources only after host is added into system */
40965- if (!atomic_read(&pinstance->expose_resources))
40966+ if (!atomic_read_unchecked(&pinstance->expose_resources))
40967 return;
40968
40969 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
40970@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
40971 init_waitqueue_head(&pinstance->reset_wait_q);
40972
40973 atomic_set(&pinstance->outstanding_cmds, 0);
40974- atomic_set(&pinstance->expose_resources, 0);
40975+ atomic_set_unchecked(&pinstance->expose_resources, 0);
40976
40977 INIT_LIST_HEAD(&pinstance->free_res_q);
40978 INIT_LIST_HEAD(&pinstance->used_res_q);
40979@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
40980 /* Schedule worker thread to handle CCN and take care of adding and
40981 * removing devices to OS
40982 */
40983- atomic_set(&pinstance->expose_resources, 1);
40984+ atomic_set_unchecked(&pinstance->expose_resources, 1);
40985 schedule_work(&pinstance->worker_q);
40986 return rc;
40987
40988diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
40989index 3441b3f..6cbe8f7 100644
40990--- a/drivers/scsi/pmcraid.h
40991+++ b/drivers/scsi/pmcraid.h
40992@@ -690,7 +690,7 @@ struct pmcraid_instance {
40993 atomic_t outstanding_cmds;
40994
40995 /* should add/delete resources to mid-layer now ?*/
40996- atomic_t expose_resources;
40997+ atomic_unchecked_t expose_resources;
40998
40999 /* Tasklet to handle deferred processing */
41000 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
41001@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
41002 struct list_head queue; /* link to "to be exposed" resources */
41003 struct pmcraid_config_table_entry cfg_entry;
41004 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41005- atomic_t read_failures; /* count of failed READ commands */
41006- atomic_t write_failures; /* count of failed WRITE commands */
41007+ atomic_unchecked_t read_failures; /* count of failed READ commands */
41008+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41009
41010 /* To indicate add/delete/modify during CCN */
41011 u8 change_detected;
41012diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
41013index 2150618..7034215 100644
41014--- a/drivers/scsi/qla2xxx/qla_def.h
41015+++ b/drivers/scsi/qla2xxx/qla_def.h
41016@@ -2089,7 +2089,7 @@ struct isp_operations {
41017
41018 int (*get_flash_version) (struct scsi_qla_host *, void *);
41019 int (*start_scsi) (srb_t *);
41020-};
41021+} __no_const;
41022
41023 /* MSI-X Support *************************************************************/
41024
41025diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41026index 81b5f29..2ae1fad 100644
41027--- a/drivers/scsi/qla4xxx/ql4_def.h
41028+++ b/drivers/scsi/qla4xxx/ql4_def.h
41029@@ -240,7 +240,7 @@ struct ddb_entry {
41030 atomic_t retry_relogin_timer; /* Min Time between relogins
41031 * (4000 only) */
41032 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
41033- atomic_t relogin_retry_count; /* Num of times relogin has been
41034+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41035 * retried */
41036
41037 uint16_t port;
41038diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
41039index af8c323..515dd51 100644
41040--- a/drivers/scsi/qla4xxx/ql4_init.c
41041+++ b/drivers/scsi/qla4xxx/ql4_init.c
41042@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
41043 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
41044 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41045 atomic_set(&ddb_entry->relogin_timer, 0);
41046- atomic_set(&ddb_entry->relogin_retry_count, 0);
41047+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41048 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41049 list_add_tail(&ddb_entry->list, &ha->ddb_list);
41050 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
41051@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
41052 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41053 atomic_set(&ddb_entry->port_down_timer,
41054 ha->port_down_retry_count);
41055- atomic_set(&ddb_entry->relogin_retry_count, 0);
41056+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41057 atomic_set(&ddb_entry->relogin_timer, 0);
41058 clear_bit(DF_RELOGIN, &ddb_entry->flags);
41059 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
41060diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41061index 83c8b5e..a82b348 100644
41062--- a/drivers/scsi/qla4xxx/ql4_os.c
41063+++ b/drivers/scsi/qla4xxx/ql4_os.c
41064@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
41065 ddb_entry->fw_ddb_device_state ==
41066 DDB_DS_SESSION_FAILED) {
41067 /* Reset retry relogin timer */
41068- atomic_inc(&ddb_entry->relogin_retry_count);
41069+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41070 DEBUG2(printk("scsi%ld: index[%d] relogin"
41071 " timed out-retrying"
41072 " relogin (%d)\n",
41073 ha->host_no,
41074 ddb_entry->fw_ddb_index,
41075- atomic_read(&ddb_entry->
41076+ atomic_read_unchecked(&ddb_entry->
41077 relogin_retry_count))
41078 );
41079 start_dpc++;
41080diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41081index dd098ca..686ce01 100644
41082--- a/drivers/scsi/scsi.c
41083+++ b/drivers/scsi/scsi.c
41084@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41085 unsigned long timeout;
41086 int rtn = 0;
41087
41088- atomic_inc(&cmd->device->iorequest_cnt);
41089+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41090
41091 /* check if the device is still usable */
41092 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41093diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
41094index bc3e363..e1a8e50 100644
41095--- a/drivers/scsi/scsi_debug.c
41096+++ b/drivers/scsi/scsi_debug.c
41097@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
41098 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
41099 unsigned char *cmd = (unsigned char *)scp->cmnd;
41100
41101+ pax_track_stack();
41102+
41103 if ((errsts = check_readiness(scp, 1, devip)))
41104 return errsts;
41105 memset(arr, 0, sizeof(arr));
41106@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
41107 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
41108 unsigned char *cmd = (unsigned char *)scp->cmnd;
41109
41110+ pax_track_stack();
41111+
41112 if ((errsts = check_readiness(scp, 1, devip)))
41113 return errsts;
41114 memset(arr, 0, sizeof(arr));
41115diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41116index 1ae7b7c..0a44924 100644
41117--- a/drivers/scsi/scsi_lib.c
41118+++ b/drivers/scsi/scsi_lib.c
41119@@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41120
41121 scsi_init_cmd_errh(cmd);
41122 cmd->result = DID_NO_CONNECT << 16;
41123- atomic_inc(&cmd->device->iorequest_cnt);
41124+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41125
41126 /*
41127 * SCSI request completion path will do scsi_device_unbusy(),
41128@@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct request *rq)
41129 */
41130 cmd->serial_number = 0;
41131
41132- atomic_inc(&cmd->device->iodone_cnt);
41133+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
41134 if (cmd->result)
41135- atomic_inc(&cmd->device->ioerr_cnt);
41136+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41137
41138 disposition = scsi_decide_disposition(cmd);
41139 if (disposition != SUCCESS &&
41140diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41141index 91a93e0..eae0fe3 100644
41142--- a/drivers/scsi/scsi_sysfs.c
41143+++ b/drivers/scsi/scsi_sysfs.c
41144@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41145 char *buf) \
41146 { \
41147 struct scsi_device *sdev = to_scsi_device(dev); \
41148- unsigned long long count = atomic_read(&sdev->field); \
41149+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
41150 return snprintf(buf, 20, "0x%llx\n", count); \
41151 } \
41152 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41153diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41154index 1030327..f91fd30 100644
41155--- a/drivers/scsi/scsi_tgt_lib.c
41156+++ b/drivers/scsi/scsi_tgt_lib.c
41157@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41158 int err;
41159
41160 dprintk("%lx %u\n", uaddr, len);
41161- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41162+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41163 if (err) {
41164 /*
41165 * TODO: need to fixup sg_tablesize, max_segment_size,
41166diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41167index db02e31..1b42ea9 100644
41168--- a/drivers/scsi/scsi_transport_fc.c
41169+++ b/drivers/scsi/scsi_transport_fc.c
41170@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
41171 * Netlink Infrastructure
41172 */
41173
41174-static atomic_t fc_event_seq;
41175+static atomic_unchecked_t fc_event_seq;
41176
41177 /**
41178 * fc_get_event_number - Obtain the next sequential FC event number
41179@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
41180 u32
41181 fc_get_event_number(void)
41182 {
41183- return atomic_add_return(1, &fc_event_seq);
41184+ return atomic_add_return_unchecked(1, &fc_event_seq);
41185 }
41186 EXPORT_SYMBOL(fc_get_event_number);
41187
41188@@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
41189 {
41190 int error;
41191
41192- atomic_set(&fc_event_seq, 0);
41193+ atomic_set_unchecked(&fc_event_seq, 0);
41194
41195 error = transport_class_register(&fc_host_class);
41196 if (error)
41197diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41198index de2f8c4..63c5278 100644
41199--- a/drivers/scsi/scsi_transport_iscsi.c
41200+++ b/drivers/scsi/scsi_transport_iscsi.c
41201@@ -81,7 +81,7 @@ struct iscsi_internal {
41202 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
41203 };
41204
41205-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41206+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41207 static struct workqueue_struct *iscsi_eh_timer_workq;
41208
41209 /*
41210@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41211 int err;
41212
41213 ihost = shost->shost_data;
41214- session->sid = atomic_add_return(1, &iscsi_session_nr);
41215+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41216
41217 if (id == ISCSI_MAX_TARGET) {
41218 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
41219@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
41220 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41221 ISCSI_TRANSPORT_VERSION);
41222
41223- atomic_set(&iscsi_session_nr, 0);
41224+ atomic_set_unchecked(&iscsi_session_nr, 0);
41225
41226 err = class_register(&iscsi_transport_class);
41227 if (err)
41228diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41229index 21a045e..ec89e03 100644
41230--- a/drivers/scsi/scsi_transport_srp.c
41231+++ b/drivers/scsi/scsi_transport_srp.c
41232@@ -33,7 +33,7 @@
41233 #include "scsi_transport_srp_internal.h"
41234
41235 struct srp_host_attrs {
41236- atomic_t next_port_id;
41237+ atomic_unchecked_t next_port_id;
41238 };
41239 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41240
41241@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41242 struct Scsi_Host *shost = dev_to_shost(dev);
41243 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41244
41245- atomic_set(&srp_host->next_port_id, 0);
41246+ atomic_set_unchecked(&srp_host->next_port_id, 0);
41247 return 0;
41248 }
41249
41250@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41251 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41252 rport->roles = ids->roles;
41253
41254- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41255+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41256 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41257
41258 transport_setup_device(&rport->dev);
41259diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41260index 040f751..98a5ed2 100644
41261--- a/drivers/scsi/sg.c
41262+++ b/drivers/scsi/sg.c
41263@@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
41264 sdp->disk->disk_name,
41265 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41266 NULL,
41267- (char *)arg);
41268+ (char __user *)arg);
41269 case BLKTRACESTART:
41270 return blk_trace_startstop(sdp->device->request_queue, 1);
41271 case BLKTRACESTOP:
41272@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
41273 const struct file_operations * fops;
41274 };
41275
41276-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
41277+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
41278 {"allow_dio", &adio_fops},
41279 {"debug", &debug_fops},
41280 {"def_reserved_size", &dressz_fops},
41281@@ -2307,7 +2307,7 @@ sg_proc_init(void)
41282 {
41283 int k, mask;
41284 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
41285- struct sg_proc_leaf * leaf;
41286+ const struct sg_proc_leaf * leaf;
41287
41288 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
41289 if (!sg_proc_sgp)
41290diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
41291index 45374d6..61ee484 100644
41292--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
41293+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
41294@@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
41295 int do_iounmap = 0;
41296 int do_disable_device = 1;
41297
41298+ pax_track_stack();
41299+
41300 memset(&sym_dev, 0, sizeof(sym_dev));
41301 memset(&nvram, 0, sizeof(nvram));
41302 sym_dev.pdev = pdev;
41303diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
41304index eadc1ab..2d81457 100644
41305--- a/drivers/serial/kgdboc.c
41306+++ b/drivers/serial/kgdboc.c
41307@@ -18,7 +18,7 @@
41308
41309 #define MAX_CONFIG_LEN 40
41310
41311-static struct kgdb_io kgdboc_io_ops;
41312+static const struct kgdb_io kgdboc_io_ops;
41313
41314 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
41315 static int configured = -1;
41316@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
41317 module_put(THIS_MODULE);
41318 }
41319
41320-static struct kgdb_io kgdboc_io_ops = {
41321+static const struct kgdb_io kgdboc_io_ops = {
41322 .name = "kgdboc",
41323 .read_char = kgdboc_get_char,
41324 .write_char = kgdboc_put_char,
41325diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41326index b76f246..7f41af7 100644
41327--- a/drivers/spi/spi.c
41328+++ b/drivers/spi/spi.c
41329@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
41330 EXPORT_SYMBOL_GPL(spi_sync);
41331
41332 /* portable code must never pass more than 32 bytes */
41333-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41334+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
41335
41336 static u8 *buf;
41337
41338diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
41339index 99010d4..6bad87b 100644
41340--- a/drivers/staging/android/binder.c
41341+++ b/drivers/staging/android/binder.c
41342@@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
41343 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
41344 }
41345
41346-static struct vm_operations_struct binder_vm_ops = {
41347+static const struct vm_operations_struct binder_vm_ops = {
41348 .open = binder_vma_open,
41349 .close = binder_vma_close,
41350 };
41351diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
41352index cda26bb..39fed3f 100644
41353--- a/drivers/staging/b3dfg/b3dfg.c
41354+++ b/drivers/staging/b3dfg/b3dfg.c
41355@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
41356 return VM_FAULT_NOPAGE;
41357 }
41358
41359-static struct vm_operations_struct b3dfg_vm_ops = {
41360+static const struct vm_operations_struct b3dfg_vm_ops = {
41361 .fault = b3dfg_vma_fault,
41362 };
41363
41364@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
41365 return r;
41366 }
41367
41368-static struct file_operations b3dfg_fops = {
41369+static const struct file_operations b3dfg_fops = {
41370 .owner = THIS_MODULE,
41371 .open = b3dfg_open,
41372 .release = b3dfg_release,
41373diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
41374index 80a1071..8c14e17 100644
41375--- a/drivers/staging/comedi/comedi_fops.c
41376+++ b/drivers/staging/comedi/comedi_fops.c
41377@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
41378 mutex_unlock(&dev->mutex);
41379 }
41380
41381-static struct vm_operations_struct comedi_vm_ops = {
41382+static const struct vm_operations_struct comedi_vm_ops = {
41383 .close = comedi_unmap,
41384 };
41385
41386diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
41387index e55a0db..577b776 100644
41388--- a/drivers/staging/dream/qdsp5/adsp_driver.c
41389+++ b/drivers/staging/dream/qdsp5/adsp_driver.c
41390@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
41391 static dev_t adsp_devno;
41392 static struct class *adsp_class;
41393
41394-static struct file_operations adsp_fops = {
41395+static const struct file_operations adsp_fops = {
41396 .owner = THIS_MODULE,
41397 .open = adsp_open,
41398 .unlocked_ioctl = adsp_ioctl,
41399diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
41400index ad2390f..4116ee8 100644
41401--- a/drivers/staging/dream/qdsp5/audio_aac.c
41402+++ b/drivers/staging/dream/qdsp5/audio_aac.c
41403@@ -1022,7 +1022,7 @@ done:
41404 return rc;
41405 }
41406
41407-static struct file_operations audio_aac_fops = {
41408+static const struct file_operations audio_aac_fops = {
41409 .owner = THIS_MODULE,
41410 .open = audio_open,
41411 .release = audio_release,
41412diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
41413index cd818a5..870b37b 100644
41414--- a/drivers/staging/dream/qdsp5/audio_amrnb.c
41415+++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
41416@@ -833,7 +833,7 @@ done:
41417 return rc;
41418 }
41419
41420-static struct file_operations audio_amrnb_fops = {
41421+static const struct file_operations audio_amrnb_fops = {
41422 .owner = THIS_MODULE,
41423 .open = audamrnb_open,
41424 .release = audamrnb_release,
41425diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
41426index 4b43e18..cedafda 100644
41427--- a/drivers/staging/dream/qdsp5/audio_evrc.c
41428+++ b/drivers/staging/dream/qdsp5/audio_evrc.c
41429@@ -805,7 +805,7 @@ dma_fail:
41430 return rc;
41431 }
41432
41433-static struct file_operations audio_evrc_fops = {
41434+static const struct file_operations audio_evrc_fops = {
41435 .owner = THIS_MODULE,
41436 .open = audevrc_open,
41437 .release = audevrc_release,
41438diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
41439index 3d950a2..9431118 100644
41440--- a/drivers/staging/dream/qdsp5/audio_in.c
41441+++ b/drivers/staging/dream/qdsp5/audio_in.c
41442@@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
41443 return 0;
41444 }
41445
41446-static struct file_operations audio_fops = {
41447+static const struct file_operations audio_fops = {
41448 .owner = THIS_MODULE,
41449 .open = audio_in_open,
41450 .release = audio_in_release,
41451@@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
41452 .unlocked_ioctl = audio_in_ioctl,
41453 };
41454
41455-static struct file_operations audpre_fops = {
41456+static const struct file_operations audpre_fops = {
41457 .owner = THIS_MODULE,
41458 .open = audpre_open,
41459 .unlocked_ioctl = audpre_ioctl,
41460diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
41461index b95574f..286c2f4 100644
41462--- a/drivers/staging/dream/qdsp5/audio_mp3.c
41463+++ b/drivers/staging/dream/qdsp5/audio_mp3.c
41464@@ -941,7 +941,7 @@ done:
41465 return rc;
41466 }
41467
41468-static struct file_operations audio_mp3_fops = {
41469+static const struct file_operations audio_mp3_fops = {
41470 .owner = THIS_MODULE,
41471 .open = audio_open,
41472 .release = audio_release,
41473diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
41474index d1adcf6..f8f9833 100644
41475--- a/drivers/staging/dream/qdsp5/audio_out.c
41476+++ b/drivers/staging/dream/qdsp5/audio_out.c
41477@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
41478 return 0;
41479 }
41480
41481-static struct file_operations audio_fops = {
41482+static const struct file_operations audio_fops = {
41483 .owner = THIS_MODULE,
41484 .open = audio_open,
41485 .release = audio_release,
41486@@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
41487 .unlocked_ioctl = audio_ioctl,
41488 };
41489
41490-static struct file_operations audpp_fops = {
41491+static const struct file_operations audpp_fops = {
41492 .owner = THIS_MODULE,
41493 .open = audpp_open,
41494 .unlocked_ioctl = audpp_ioctl,
41495diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
41496index f0f50e3..f6b9dbc 100644
41497--- a/drivers/staging/dream/qdsp5/audio_qcelp.c
41498+++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
41499@@ -816,7 +816,7 @@ err:
41500 return rc;
41501 }
41502
41503-static struct file_operations audio_qcelp_fops = {
41504+static const struct file_operations audio_qcelp_fops = {
41505 .owner = THIS_MODULE,
41506 .open = audqcelp_open,
41507 .release = audqcelp_release,
41508diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
41509index 037d7ff..5469ec3 100644
41510--- a/drivers/staging/dream/qdsp5/snd.c
41511+++ b/drivers/staging/dream/qdsp5/snd.c
41512@@ -242,7 +242,7 @@ err:
41513 return rc;
41514 }
41515
41516-static struct file_operations snd_fops = {
41517+static const struct file_operations snd_fops = {
41518 .owner = THIS_MODULE,
41519 .open = snd_open,
41520 .release = snd_release,
41521diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
41522index d4e7d88..0ea632a 100644
41523--- a/drivers/staging/dream/smd/smd_qmi.c
41524+++ b/drivers/staging/dream/smd/smd_qmi.c
41525@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
41526 return 0;
41527 }
41528
41529-static struct file_operations qmi_fops = {
41530+static const struct file_operations qmi_fops = {
41531 .owner = THIS_MODULE,
41532 .read = qmi_read,
41533 .write = qmi_write,
41534diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41535index cd3910b..ff053d3 100644
41536--- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
41537+++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41538@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
41539 return rc;
41540 }
41541
41542-static struct file_operations rpcrouter_server_fops = {
41543+static const struct file_operations rpcrouter_server_fops = {
41544 .owner = THIS_MODULE,
41545 .open = rpcrouter_open,
41546 .release = rpcrouter_release,
41547@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
41548 .unlocked_ioctl = rpcrouter_ioctl,
41549 };
41550
41551-static struct file_operations rpcrouter_router_fops = {
41552+static const struct file_operations rpcrouter_router_fops = {
41553 .owner = THIS_MODULE,
41554 .open = rpcrouter_open,
41555 .release = rpcrouter_release,
41556diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
41557index c24e4e0..07665be 100644
41558--- a/drivers/staging/dst/dcore.c
41559+++ b/drivers/staging/dst/dcore.c
41560@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
41561 return 0;
41562 }
41563
41564-static struct block_device_operations dst_blk_ops = {
41565+static const struct block_device_operations dst_blk_ops = {
41566 .open = dst_bdev_open,
41567 .release = dst_bdev_release,
41568 .owner = THIS_MODULE,
41569@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
41570 n->size = ctl->size;
41571
41572 atomic_set(&n->refcnt, 1);
41573- atomic_long_set(&n->gen, 0);
41574+ atomic_long_set_unchecked(&n->gen, 0);
41575 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
41576
41577 err = dst_node_sysfs_init(n);
41578diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
41579index 557d372..8d84422 100644
41580--- a/drivers/staging/dst/trans.c
41581+++ b/drivers/staging/dst/trans.c
41582@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
41583 t->error = 0;
41584 t->retries = 0;
41585 atomic_set(&t->refcnt, 1);
41586- t->gen = atomic_long_inc_return(&n->gen);
41587+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
41588
41589 t->enc = bio_data_dir(bio);
41590 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
41591diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
41592index 94f7752..d051514 100644
41593--- a/drivers/staging/et131x/et1310_tx.c
41594+++ b/drivers/staging/et131x/et1310_tx.c
41595@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
41596 struct net_device_stats *stats = &etdev->net_stats;
41597
41598 if (pMpTcb->Flags & fMP_DEST_BROAD)
41599- atomic_inc(&etdev->Stats.brdcstxmt);
41600+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
41601 else if (pMpTcb->Flags & fMP_DEST_MULTI)
41602- atomic_inc(&etdev->Stats.multixmt);
41603+ atomic_inc_unchecked(&etdev->Stats.multixmt);
41604 else
41605- atomic_inc(&etdev->Stats.unixmt);
41606+ atomic_inc_unchecked(&etdev->Stats.unixmt);
41607
41608 if (pMpTcb->Packet) {
41609 stats->tx_bytes += pMpTcb->Packet->len;
41610diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
41611index 1dfe06f..f469b4d 100644
41612--- a/drivers/staging/et131x/et131x_adapter.h
41613+++ b/drivers/staging/et131x/et131x_adapter.h
41614@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
41615 * operations
41616 */
41617 u32 unircv; /* # multicast packets received */
41618- atomic_t unixmt; /* # multicast packets for Tx */
41619+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
41620 u32 multircv; /* # multicast packets received */
41621- atomic_t multixmt; /* # multicast packets for Tx */
41622+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
41623 u32 brdcstrcv; /* # broadcast packets received */
41624- atomic_t brdcstxmt; /* # broadcast packets for Tx */
41625+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
41626 u32 norcvbuf; /* # Rx packets discarded */
41627 u32 noxmtbuf; /* # Tx packets discarded */
41628
41629diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
41630index 4bd353a..e28f455 100644
41631--- a/drivers/staging/go7007/go7007-v4l2.c
41632+++ b/drivers/staging/go7007/go7007-v4l2.c
41633@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41634 return 0;
41635 }
41636
41637-static struct vm_operations_struct go7007_vm_ops = {
41638+static const struct vm_operations_struct go7007_vm_ops = {
41639 .open = go7007_vm_open,
41640 .close = go7007_vm_close,
41641 .fault = go7007_vm_fault,
41642diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
41643index 366dc95..b974d87 100644
41644--- a/drivers/staging/hv/Channel.c
41645+++ b/drivers/staging/hv/Channel.c
41646@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
41647
41648 DPRINT_ENTER(VMBUS);
41649
41650- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
41651- atomic_inc(&gVmbusConnection.NextGpadlHandle);
41652+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
41653+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
41654
41655 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
41656 ASSERT(msgInfo != NULL);
41657diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
41658index b12237f..01ae28a 100644
41659--- a/drivers/staging/hv/Hv.c
41660+++ b/drivers/staging/hv/Hv.c
41661@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
41662 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
41663 u32 outputAddressHi = outputAddress >> 32;
41664 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
41665- volatile void *hypercallPage = gHvContext.HypercallPage;
41666+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
41667
41668 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
41669 Control, Input, Output);
41670diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
41671index d089bb1..2ebc158 100644
41672--- a/drivers/staging/hv/VmbusApi.h
41673+++ b/drivers/staging/hv/VmbusApi.h
41674@@ -109,7 +109,7 @@ struct vmbus_channel_interface {
41675 u32 *GpadlHandle);
41676 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
41677 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
41678-};
41679+} __no_const;
41680
41681 /* Base driver object */
41682 struct hv_driver {
41683diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
41684index 5a37cce..6ecc88c 100644
41685--- a/drivers/staging/hv/VmbusPrivate.h
41686+++ b/drivers/staging/hv/VmbusPrivate.h
41687@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
41688 struct VMBUS_CONNECTION {
41689 enum VMBUS_CONNECT_STATE ConnectState;
41690
41691- atomic_t NextGpadlHandle;
41692+ atomic_unchecked_t NextGpadlHandle;
41693
41694 /*
41695 * Represents channel interrupts. Each bit position represents a
41696diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
41697index 871a202..ca50ddf 100644
41698--- a/drivers/staging/hv/blkvsc_drv.c
41699+++ b/drivers/staging/hv/blkvsc_drv.c
41700@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
41701 /* The one and only one */
41702 static struct blkvsc_driver_context g_blkvsc_drv;
41703
41704-static struct block_device_operations block_ops = {
41705+static const struct block_device_operations block_ops = {
41706 .owner = THIS_MODULE,
41707 .open = blkvsc_open,
41708 .release = blkvsc_release,
41709diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
41710index 6acc49a..fbc8d46 100644
41711--- a/drivers/staging/hv/vmbus_drv.c
41712+++ b/drivers/staging/hv/vmbus_drv.c
41713@@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
41714 to_device_context(root_device_obj);
41715 struct device_context *child_device_ctx =
41716 to_device_context(child_device_obj);
41717- static atomic_t device_num = ATOMIC_INIT(0);
41718+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
41719
41720 DPRINT_ENTER(VMBUS_DRV);
41721
41722@@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
41723
41724 /* Set the device name. Otherwise, device_register() will fail. */
41725 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
41726- atomic_inc_return(&device_num));
41727+ atomic_inc_return_unchecked(&device_num));
41728
41729 /* The new device belongs to this bus */
41730 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
41731diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
41732index d926189..17b19fd 100644
41733--- a/drivers/staging/iio/ring_generic.h
41734+++ b/drivers/staging/iio/ring_generic.h
41735@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
41736
41737 int (*is_enabled)(struct iio_ring_buffer *ring);
41738 int (*enable)(struct iio_ring_buffer *ring);
41739-};
41740+} __no_const;
41741
41742 /**
41743 * struct iio_ring_buffer - general ring buffer structure
41744diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
41745index 1b237b7..88c624e 100644
41746--- a/drivers/staging/octeon/ethernet-rx.c
41747+++ b/drivers/staging/octeon/ethernet-rx.c
41748@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
41749 /* Increment RX stats for virtual ports */
41750 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
41751 #ifdef CONFIG_64BIT
41752- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
41753- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
41754+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
41755+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
41756 #else
41757- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
41758- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
41759+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
41760+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
41761 #endif
41762 }
41763 netif_receive_skb(skb);
41764@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
41765 dev->name);
41766 */
41767 #ifdef CONFIG_64BIT
41768- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
41769+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
41770 #else
41771- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
41772+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
41773 #endif
41774 dev_kfree_skb_irq(skb);
41775 }
41776diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
41777index 492c502..d9909f1 100644
41778--- a/drivers/staging/octeon/ethernet.c
41779+++ b/drivers/staging/octeon/ethernet.c
41780@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
41781 * since the RX tasklet also increments it.
41782 */
41783 #ifdef CONFIG_64BIT
41784- atomic64_add(rx_status.dropped_packets,
41785- (atomic64_t *)&priv->stats.rx_dropped);
41786+ atomic64_add_unchecked(rx_status.dropped_packets,
41787+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
41788 #else
41789- atomic_add(rx_status.dropped_packets,
41790- (atomic_t *)&priv->stats.rx_dropped);
41791+ atomic_add_unchecked(rx_status.dropped_packets,
41792+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
41793 #endif
41794 }
41795
41796diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
41797index a35bd5d..28fff45 100644
41798--- a/drivers/staging/otus/80211core/pub_zfi.h
41799+++ b/drivers/staging/otus/80211core/pub_zfi.h
41800@@ -531,7 +531,7 @@ struct zsCbFuncTbl
41801 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
41802
41803 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
41804-};
41805+} __no_const;
41806
41807 extern void zfZeroMemory(u8_t* va, u16_t length);
41808 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
41809diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
41810index c39a25f..696f5aa 100644
41811--- a/drivers/staging/panel/panel.c
41812+++ b/drivers/staging/panel/panel.c
41813@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
41814 return 0;
41815 }
41816
41817-static struct file_operations lcd_fops = {
41818+static const struct file_operations lcd_fops = {
41819 .write = lcd_write,
41820 .open = lcd_open,
41821 .release = lcd_release,
41822@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
41823 return 0;
41824 }
41825
41826-static struct file_operations keypad_fops = {
41827+static const struct file_operations keypad_fops = {
41828 .read = keypad_read, /* read */
41829 .open = keypad_open, /* open */
41830 .release = keypad_release, /* close */
41831diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
41832index 270ebcb..37e46af 100644
41833--- a/drivers/staging/phison/phison.c
41834+++ b/drivers/staging/phison/phison.c
41835@@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
41836 ATA_BMDMA_SHT(DRV_NAME),
41837 };
41838
41839-static struct ata_port_operations phison_ops = {
41840+static const struct ata_port_operations phison_ops = {
41841 .inherits = &ata_bmdma_port_ops,
41842 .prereset = phison_pre_reset,
41843 };
41844diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
41845index 2eb8e3d..57616a7 100644
41846--- a/drivers/staging/poch/poch.c
41847+++ b/drivers/staging/poch/poch.c
41848@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
41849 return 0;
41850 }
41851
41852-static struct file_operations poch_fops = {
41853+static const struct file_operations poch_fops = {
41854 .owner = THIS_MODULE,
41855 .open = poch_open,
41856 .release = poch_release,
41857diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
41858index c94de31..19402bc 100644
41859--- a/drivers/staging/pohmelfs/inode.c
41860+++ b/drivers/staging/pohmelfs/inode.c
41861@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
41862 mutex_init(&psb->mcache_lock);
41863 psb->mcache_root = RB_ROOT;
41864 psb->mcache_timeout = msecs_to_jiffies(5000);
41865- atomic_long_set(&psb->mcache_gen, 0);
41866+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
41867
41868 psb->trans_max_pages = 100;
41869
41870@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
41871 INIT_LIST_HEAD(&psb->crypto_ready_list);
41872 INIT_LIST_HEAD(&psb->crypto_active_list);
41873
41874- atomic_set(&psb->trans_gen, 1);
41875+ atomic_set_unchecked(&psb->trans_gen, 1);
41876 atomic_long_set(&psb->total_inodes, 0);
41877
41878 mutex_init(&psb->state_lock);
41879diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
41880index e22665c..a2a9390 100644
41881--- a/drivers/staging/pohmelfs/mcache.c
41882+++ b/drivers/staging/pohmelfs/mcache.c
41883@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
41884 m->data = data;
41885 m->start = start;
41886 m->size = size;
41887- m->gen = atomic_long_inc_return(&psb->mcache_gen);
41888+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
41889
41890 mutex_lock(&psb->mcache_lock);
41891 err = pohmelfs_mcache_insert(psb, m);
41892diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
41893index 623a07d..4035c19 100644
41894--- a/drivers/staging/pohmelfs/netfs.h
41895+++ b/drivers/staging/pohmelfs/netfs.h
41896@@ -570,14 +570,14 @@ struct pohmelfs_config;
41897 struct pohmelfs_sb {
41898 struct rb_root mcache_root;
41899 struct mutex mcache_lock;
41900- atomic_long_t mcache_gen;
41901+ atomic_long_unchecked_t mcache_gen;
41902 unsigned long mcache_timeout;
41903
41904 unsigned int idx;
41905
41906 unsigned int trans_retries;
41907
41908- atomic_t trans_gen;
41909+ atomic_unchecked_t trans_gen;
41910
41911 unsigned int crypto_attached_size;
41912 unsigned int crypto_align_size;
41913diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
41914index 36a2535..0591bf4 100644
41915--- a/drivers/staging/pohmelfs/trans.c
41916+++ b/drivers/staging/pohmelfs/trans.c
41917@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
41918 int err;
41919 struct netfs_cmd *cmd = t->iovec.iov_base;
41920
41921- t->gen = atomic_inc_return(&psb->trans_gen);
41922+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
41923
41924 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
41925 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
41926diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
41927index f890a16..509ece8 100644
41928--- a/drivers/staging/sep/sep_driver.c
41929+++ b/drivers/staging/sep/sep_driver.c
41930@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
41931 static dev_t sep_devno;
41932
41933 /* the files operations structure of the driver */
41934-static struct file_operations sep_file_operations = {
41935+static const struct file_operations sep_file_operations = {
41936 .owner = THIS_MODULE,
41937 .ioctl = sep_ioctl,
41938 .poll = sep_poll,
41939diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
41940index 5e16bc3..7655b10 100644
41941--- a/drivers/staging/usbip/usbip_common.h
41942+++ b/drivers/staging/usbip/usbip_common.h
41943@@ -374,7 +374,7 @@ struct usbip_device {
41944 void (*shutdown)(struct usbip_device *);
41945 void (*reset)(struct usbip_device *);
41946 void (*unusable)(struct usbip_device *);
41947- } eh_ops;
41948+ } __no_const eh_ops;
41949 };
41950
41951
41952diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
41953index 57f7946..d9df23d 100644
41954--- a/drivers/staging/usbip/vhci.h
41955+++ b/drivers/staging/usbip/vhci.h
41956@@ -92,7 +92,7 @@ struct vhci_hcd {
41957 unsigned resuming:1;
41958 unsigned long re_timeout;
41959
41960- atomic_t seqnum;
41961+ atomic_unchecked_t seqnum;
41962
41963 /*
41964 * NOTE:
41965diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
41966index 20cd7db..c2693ff 100644
41967--- a/drivers/staging/usbip/vhci_hcd.c
41968+++ b/drivers/staging/usbip/vhci_hcd.c
41969@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
41970 return;
41971 }
41972
41973- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
41974+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
41975 if (priv->seqnum == 0xffff)
41976 usbip_uinfo("seqnum max\n");
41977
41978@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
41979 return -ENOMEM;
41980 }
41981
41982- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
41983+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
41984 if (unlink->seqnum == 0xffff)
41985 usbip_uinfo("seqnum max\n");
41986
41987@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
41988 vdev->rhport = rhport;
41989 }
41990
41991- atomic_set(&vhci->seqnum, 0);
41992+ atomic_set_unchecked(&vhci->seqnum, 0);
41993 spin_lock_init(&vhci->lock);
41994
41995
41996diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
41997index 8ed5206..92469e3 100644
41998--- a/drivers/staging/usbip/vhci_rx.c
41999+++ b/drivers/staging/usbip/vhci_rx.c
42000@@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42001 usbip_uerr("cannot find a urb of seqnum %u\n",
42002 pdu->base.seqnum);
42003 usbip_uinfo("max seqnum %d\n",
42004- atomic_read(&the_controller->seqnum));
42005+ atomic_read_unchecked(&the_controller->seqnum));
42006 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42007 return;
42008 }
42009diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
42010index 7891288..8e31300 100644
42011--- a/drivers/staging/vme/devices/vme_user.c
42012+++ b/drivers/staging/vme/devices/vme_user.c
42013@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
42014 static int __init vme_user_probe(struct device *, int, int);
42015 static int __exit vme_user_remove(struct device *, int, int);
42016
42017-static struct file_operations vme_user_fops = {
42018+static const struct file_operations vme_user_fops = {
42019 .open = vme_user_open,
42020 .release = vme_user_release,
42021 .read = vme_user_read,
42022diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42023index 58abf44..00c1fc8 100644
42024--- a/drivers/staging/vt6655/hostap.c
42025+++ b/drivers/staging/vt6655/hostap.c
42026@@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42027 PSDevice apdev_priv;
42028 struct net_device *dev = pDevice->dev;
42029 int ret;
42030- const struct net_device_ops apdev_netdev_ops = {
42031+ net_device_ops_no_const apdev_netdev_ops = {
42032 .ndo_start_xmit = pDevice->tx_80211,
42033 };
42034
42035diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42036index 0c8267a..db1f363 100644
42037--- a/drivers/staging/vt6656/hostap.c
42038+++ b/drivers/staging/vt6656/hostap.c
42039@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42040 PSDevice apdev_priv;
42041 struct net_device *dev = pDevice->dev;
42042 int ret;
42043- const struct net_device_ops apdev_netdev_ops = {
42044+ net_device_ops_no_const apdev_netdev_ops = {
42045 .ndo_start_xmit = pDevice->tx_80211,
42046 };
42047
42048diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
42049index 925678b..da7f5ed 100644
42050--- a/drivers/staging/wlan-ng/hfa384x_usb.c
42051+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
42052@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
42053
42054 struct usbctlx_completor {
42055 int (*complete) (struct usbctlx_completor *);
42056-};
42057+} __no_const;
42058 typedef struct usbctlx_completor usbctlx_completor_t;
42059
42060 static int
42061diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
42062index 40de151..924f268 100644
42063--- a/drivers/telephony/ixj.c
42064+++ b/drivers/telephony/ixj.c
42065@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
42066 bool mContinue;
42067 char *pIn, *pOut;
42068
42069+ pax_track_stack();
42070+
42071 if (!SCI_Prepare(j))
42072 return 0;
42073
42074diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
42075index e941367..b631f5a 100644
42076--- a/drivers/uio/uio.c
42077+++ b/drivers/uio/uio.c
42078@@ -23,6 +23,7 @@
42079 #include <linux/string.h>
42080 #include <linux/kobject.h>
42081 #include <linux/uio_driver.h>
42082+#include <asm/local.h>
42083
42084 #define UIO_MAX_DEVICES 255
42085
42086@@ -30,10 +31,10 @@ struct uio_device {
42087 struct module *owner;
42088 struct device *dev;
42089 int minor;
42090- atomic_t event;
42091+ atomic_unchecked_t event;
42092 struct fasync_struct *async_queue;
42093 wait_queue_head_t wait;
42094- int vma_count;
42095+ local_t vma_count;
42096 struct uio_info *info;
42097 struct kobject *map_dir;
42098 struct kobject *portio_dir;
42099@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
42100 return entry->show(mem, buf);
42101 }
42102
42103-static struct sysfs_ops map_sysfs_ops = {
42104+static const struct sysfs_ops map_sysfs_ops = {
42105 .show = map_type_show,
42106 };
42107
42108@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
42109 return entry->show(port, buf);
42110 }
42111
42112-static struct sysfs_ops portio_sysfs_ops = {
42113+static const struct sysfs_ops portio_sysfs_ops = {
42114 .show = portio_type_show,
42115 };
42116
42117@@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
42118 struct uio_device *idev = dev_get_drvdata(dev);
42119 if (idev)
42120 return sprintf(buf, "%u\n",
42121- (unsigned int)atomic_read(&idev->event));
42122+ (unsigned int)atomic_read_unchecked(&idev->event));
42123 else
42124 return -ENODEV;
42125 }
42126@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
42127 {
42128 struct uio_device *idev = info->uio_dev;
42129
42130- atomic_inc(&idev->event);
42131+ atomic_inc_unchecked(&idev->event);
42132 wake_up_interruptible(&idev->wait);
42133 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
42134 }
42135@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
42136 }
42137
42138 listener->dev = idev;
42139- listener->event_count = atomic_read(&idev->event);
42140+ listener->event_count = atomic_read_unchecked(&idev->event);
42141 filep->private_data = listener;
42142
42143 if (idev->info->open) {
42144@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
42145 return -EIO;
42146
42147 poll_wait(filep, &idev->wait, wait);
42148- if (listener->event_count != atomic_read(&idev->event))
42149+ if (listener->event_count != atomic_read_unchecked(&idev->event))
42150 return POLLIN | POLLRDNORM;
42151 return 0;
42152 }
42153@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
42154 do {
42155 set_current_state(TASK_INTERRUPTIBLE);
42156
42157- event_count = atomic_read(&idev->event);
42158+ event_count = atomic_read_unchecked(&idev->event);
42159 if (event_count != listener->event_count) {
42160 if (copy_to_user(buf, &event_count, count))
42161 retval = -EFAULT;
42162@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
42163 static void uio_vma_open(struct vm_area_struct *vma)
42164 {
42165 struct uio_device *idev = vma->vm_private_data;
42166- idev->vma_count++;
42167+ local_inc(&idev->vma_count);
42168 }
42169
42170 static void uio_vma_close(struct vm_area_struct *vma)
42171 {
42172 struct uio_device *idev = vma->vm_private_data;
42173- idev->vma_count--;
42174+ local_dec(&idev->vma_count);
42175 }
42176
42177 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42178@@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
42179 idev->owner = owner;
42180 idev->info = info;
42181 init_waitqueue_head(&idev->wait);
42182- atomic_set(&idev->event, 0);
42183+ atomic_set_unchecked(&idev->event, 0);
42184
42185 ret = uio_get_minor(idev);
42186 if (ret)
42187diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
42188index fbea856..06efea6 100644
42189--- a/drivers/usb/atm/usbatm.c
42190+++ b/drivers/usb/atm/usbatm.c
42191@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42192 if (printk_ratelimit())
42193 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
42194 __func__, vpi, vci);
42195- atomic_inc(&vcc->stats->rx_err);
42196+ atomic_inc_unchecked(&vcc->stats->rx_err);
42197 return;
42198 }
42199
42200@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42201 if (length > ATM_MAX_AAL5_PDU) {
42202 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
42203 __func__, length, vcc);
42204- atomic_inc(&vcc->stats->rx_err);
42205+ atomic_inc_unchecked(&vcc->stats->rx_err);
42206 goto out;
42207 }
42208
42209@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42210 if (sarb->len < pdu_length) {
42211 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
42212 __func__, pdu_length, sarb->len, vcc);
42213- atomic_inc(&vcc->stats->rx_err);
42214+ atomic_inc_unchecked(&vcc->stats->rx_err);
42215 goto out;
42216 }
42217
42218 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
42219 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
42220 __func__, vcc);
42221- atomic_inc(&vcc->stats->rx_err);
42222+ atomic_inc_unchecked(&vcc->stats->rx_err);
42223 goto out;
42224 }
42225
42226@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42227 if (printk_ratelimit())
42228 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
42229 __func__, length);
42230- atomic_inc(&vcc->stats->rx_drop);
42231+ atomic_inc_unchecked(&vcc->stats->rx_drop);
42232 goto out;
42233 }
42234
42235@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42236
42237 vcc->push(vcc, skb);
42238
42239- atomic_inc(&vcc->stats->rx);
42240+ atomic_inc_unchecked(&vcc->stats->rx);
42241 out:
42242 skb_trim(sarb, 0);
42243 }
42244@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
42245 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
42246
42247 usbatm_pop(vcc, skb);
42248- atomic_inc(&vcc->stats->tx);
42249+ atomic_inc_unchecked(&vcc->stats->tx);
42250
42251 skb = skb_dequeue(&instance->sndqueue);
42252 }
42253@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
42254 if (!left--)
42255 return sprintf(page,
42256 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
42257- atomic_read(&atm_dev->stats.aal5.tx),
42258- atomic_read(&atm_dev->stats.aal5.tx_err),
42259- atomic_read(&atm_dev->stats.aal5.rx),
42260- atomic_read(&atm_dev->stats.aal5.rx_err),
42261- atomic_read(&atm_dev->stats.aal5.rx_drop));
42262+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
42263+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
42264+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
42265+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
42266+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
42267
42268 if (!left--) {
42269 if (instance->disconnected)
42270diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
42271index 3e564bf..949b448 100644
42272--- a/drivers/usb/class/cdc-wdm.c
42273+++ b/drivers/usb/class/cdc-wdm.c
42274@@ -314,7 +314,7 @@ static ssize_t wdm_write
42275 if (r < 0)
42276 goto outnp;
42277
42278- if (!file->f_flags && O_NONBLOCK)
42279+ if (!(file->f_flags & O_NONBLOCK))
42280 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
42281 &desc->flags));
42282 else
42283diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
42284index 24e6205..fe5a5d4 100644
42285--- a/drivers/usb/core/hcd.c
42286+++ b/drivers/usb/core/hcd.c
42287@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
42288
42289 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42290
42291-struct usb_mon_operations *mon_ops;
42292+const struct usb_mon_operations *mon_ops;
42293
42294 /*
42295 * The registration is unlocked.
42296@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
42297 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
42298 */
42299
42300-int usb_mon_register (struct usb_mon_operations *ops)
42301+int usb_mon_register (const struct usb_mon_operations *ops)
42302 {
42303
42304 if (mon_ops)
42305diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
42306index bcbe104..9cfd1c6 100644
42307--- a/drivers/usb/core/hcd.h
42308+++ b/drivers/usb/core/hcd.h
42309@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
42310 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42311
42312 struct usb_mon_operations {
42313- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
42314- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42315- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42316+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
42317+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42318+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42319 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
42320 };
42321
42322-extern struct usb_mon_operations *mon_ops;
42323+extern const struct usb_mon_operations *mon_ops;
42324
42325 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
42326 {
42327@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
42328 (*mon_ops->urb_complete)(bus, urb, status);
42329 }
42330
42331-int usb_mon_register(struct usb_mon_operations *ops);
42332+int usb_mon_register(const struct usb_mon_operations *ops);
42333 void usb_mon_deregister(void);
42334
42335 #else
42336diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
42337index 409cc94..a673bad 100644
42338--- a/drivers/usb/core/message.c
42339+++ b/drivers/usb/core/message.c
42340@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
42341 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
42342 if (buf) {
42343 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
42344- if (len > 0) {
42345- smallbuf = kmalloc(++len, GFP_NOIO);
42346+ if (len++ > 0) {
42347+ smallbuf = kmalloc(len, GFP_NOIO);
42348 if (!smallbuf)
42349 return buf;
42350 memcpy(smallbuf, buf, len);
42351diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
42352index 62ff5e7..530b74e 100644
42353--- a/drivers/usb/misc/appledisplay.c
42354+++ b/drivers/usb/misc/appledisplay.c
42355@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
42356 return pdata->msgdata[1];
42357 }
42358
42359-static struct backlight_ops appledisplay_bl_data = {
42360+static const struct backlight_ops appledisplay_bl_data = {
42361 .get_brightness = appledisplay_bl_get_brightness,
42362 .update_status = appledisplay_bl_update_status,
42363 };
42364diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
42365index e0c2db3..bd8cb66 100644
42366--- a/drivers/usb/mon/mon_main.c
42367+++ b/drivers/usb/mon/mon_main.c
42368@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
42369 /*
42370 * Ops
42371 */
42372-static struct usb_mon_operations mon_ops_0 = {
42373+static const struct usb_mon_operations mon_ops_0 = {
42374 .urb_submit = mon_submit,
42375 .urb_submit_error = mon_submit_error,
42376 .urb_complete = mon_complete,
42377diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
42378index d6bea3e..60b250e 100644
42379--- a/drivers/usb/wusbcore/wa-hc.h
42380+++ b/drivers/usb/wusbcore/wa-hc.h
42381@@ -192,7 +192,7 @@ struct wahc {
42382 struct list_head xfer_delayed_list;
42383 spinlock_t xfer_list_lock;
42384 struct work_struct xfer_work;
42385- atomic_t xfer_id_count;
42386+ atomic_unchecked_t xfer_id_count;
42387 };
42388
42389
42390@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42391 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42392 spin_lock_init(&wa->xfer_list_lock);
42393 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42394- atomic_set(&wa->xfer_id_count, 1);
42395+ atomic_set_unchecked(&wa->xfer_id_count, 1);
42396 }
42397
42398 /**
42399diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42400index 613a5fc..3174865 100644
42401--- a/drivers/usb/wusbcore/wa-xfer.c
42402+++ b/drivers/usb/wusbcore/wa-xfer.c
42403@@ -293,7 +293,7 @@ out:
42404 */
42405 static void wa_xfer_id_init(struct wa_xfer *xfer)
42406 {
42407- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42408+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42409 }
42410
42411 /*
42412diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
42413index aa42fce..f8a828c 100644
42414--- a/drivers/uwb/wlp/messages.c
42415+++ b/drivers/uwb/wlp/messages.c
42416@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
42417 size_t len = skb->len;
42418 size_t used;
42419 ssize_t result;
42420- struct wlp_nonce enonce, rnonce;
42421+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
42422 enum wlp_assc_error assc_err;
42423 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
42424 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
42425diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
42426index 0370399..6627c94 100644
42427--- a/drivers/uwb/wlp/sysfs.c
42428+++ b/drivers/uwb/wlp/sysfs.c
42429@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
42430 return ret;
42431 }
42432
42433-static
42434-struct sysfs_ops wss_sysfs_ops = {
42435+static const struct sysfs_ops wss_sysfs_ops = {
42436 .show = wlp_wss_attr_show,
42437 .store = wlp_wss_attr_store,
42438 };
42439diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
42440index d5e8010..5687b56 100644
42441--- a/drivers/video/atmel_lcdfb.c
42442+++ b/drivers/video/atmel_lcdfb.c
42443@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
42444 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
42445 }
42446
42447-static struct backlight_ops atmel_lcdc_bl_ops = {
42448+static const struct backlight_ops atmel_lcdc_bl_ops = {
42449 .update_status = atmel_bl_update_status,
42450 .get_brightness = atmel_bl_get_brightness,
42451 };
42452diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42453index e4e4d43..66bcbcc 100644
42454--- a/drivers/video/aty/aty128fb.c
42455+++ b/drivers/video/aty/aty128fb.c
42456@@ -149,7 +149,7 @@ enum {
42457 };
42458
42459 /* Must match above enum */
42460-static const char *r128_family[] __devinitdata = {
42461+static const char *r128_family[] __devinitconst = {
42462 "AGP",
42463 "PCI",
42464 "PRO AGP",
42465@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
42466 return bd->props.brightness;
42467 }
42468
42469-static struct backlight_ops aty128_bl_data = {
42470+static const struct backlight_ops aty128_bl_data = {
42471 .get_brightness = aty128_bl_get_brightness,
42472 .update_status = aty128_bl_update_status,
42473 };
42474diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
42475index 913b4a4..9295a38 100644
42476--- a/drivers/video/aty/atyfb_base.c
42477+++ b/drivers/video/aty/atyfb_base.c
42478@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
42479 return bd->props.brightness;
42480 }
42481
42482-static struct backlight_ops aty_bl_data = {
42483+static const struct backlight_ops aty_bl_data = {
42484 .get_brightness = aty_bl_get_brightness,
42485 .update_status = aty_bl_update_status,
42486 };
42487diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
42488index 1a056ad..221bd6a 100644
42489--- a/drivers/video/aty/radeon_backlight.c
42490+++ b/drivers/video/aty/radeon_backlight.c
42491@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
42492 return bd->props.brightness;
42493 }
42494
42495-static struct backlight_ops radeon_bl_data = {
42496+static const struct backlight_ops radeon_bl_data = {
42497 .get_brightness = radeon_bl_get_brightness,
42498 .update_status = radeon_bl_update_status,
42499 };
42500diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
42501index ad05da5..3cb2cb9 100644
42502--- a/drivers/video/backlight/adp5520_bl.c
42503+++ b/drivers/video/backlight/adp5520_bl.c
42504@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
42505 return error ? data->current_brightness : reg_val;
42506 }
42507
42508-static struct backlight_ops adp5520_bl_ops = {
42509+static const struct backlight_ops adp5520_bl_ops = {
42510 .update_status = adp5520_bl_update_status,
42511 .get_brightness = adp5520_bl_get_brightness,
42512 };
42513diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
42514index 2c3bdfc..d769b0b 100644
42515--- a/drivers/video/backlight/adx_bl.c
42516+++ b/drivers/video/backlight/adx_bl.c
42517@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
42518 return 1;
42519 }
42520
42521-static struct backlight_ops adx_backlight_ops = {
42522+static const struct backlight_ops adx_backlight_ops = {
42523 .options = 0,
42524 .update_status = adx_backlight_update_status,
42525 .get_brightness = adx_backlight_get_brightness,
42526diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
42527index 505c082..6b6b3cc 100644
42528--- a/drivers/video/backlight/atmel-pwm-bl.c
42529+++ b/drivers/video/backlight/atmel-pwm-bl.c
42530@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
42531 return pwm_channel_enable(&pwmbl->pwmc);
42532 }
42533
42534-static struct backlight_ops atmel_pwm_bl_ops = {
42535+static const struct backlight_ops atmel_pwm_bl_ops = {
42536 .get_brightness = atmel_pwm_bl_get_intensity,
42537 .update_status = atmel_pwm_bl_set_intensity,
42538 };
42539diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
42540index 5e20e6e..89025e6 100644
42541--- a/drivers/video/backlight/backlight.c
42542+++ b/drivers/video/backlight/backlight.c
42543@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
42544 * ERR_PTR() or a pointer to the newly allocated device.
42545 */
42546 struct backlight_device *backlight_device_register(const char *name,
42547- struct device *parent, void *devdata, struct backlight_ops *ops)
42548+ struct device *parent, void *devdata, const struct backlight_ops *ops)
42549 {
42550 struct backlight_device *new_bd;
42551 int rc;
42552diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
42553index 9677494..b4bcf80 100644
42554--- a/drivers/video/backlight/corgi_lcd.c
42555+++ b/drivers/video/backlight/corgi_lcd.c
42556@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
42557 }
42558 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
42559
42560-static struct backlight_ops corgi_bl_ops = {
42561+static const struct backlight_ops corgi_bl_ops = {
42562 .get_brightness = corgi_bl_get_intensity,
42563 .update_status = corgi_bl_update_status,
42564 };
42565diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
42566index b9fe62b..2914bf1 100644
42567--- a/drivers/video/backlight/cr_bllcd.c
42568+++ b/drivers/video/backlight/cr_bllcd.c
42569@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
42570 return intensity;
42571 }
42572
42573-static struct backlight_ops cr_backlight_ops = {
42574+static const struct backlight_ops cr_backlight_ops = {
42575 .get_brightness = cr_backlight_get_intensity,
42576 .update_status = cr_backlight_set_intensity,
42577 };
42578diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
42579index 701a108..feacfd5 100644
42580--- a/drivers/video/backlight/da903x_bl.c
42581+++ b/drivers/video/backlight/da903x_bl.c
42582@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
42583 return data->current_brightness;
42584 }
42585
42586-static struct backlight_ops da903x_backlight_ops = {
42587+static const struct backlight_ops da903x_backlight_ops = {
42588 .update_status = da903x_backlight_update_status,
42589 .get_brightness = da903x_backlight_get_brightness,
42590 };
42591diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
42592index 6d27f62..e6d348e 100644
42593--- a/drivers/video/backlight/generic_bl.c
42594+++ b/drivers/video/backlight/generic_bl.c
42595@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
42596 }
42597 EXPORT_SYMBOL(corgibl_limit_intensity);
42598
42599-static struct backlight_ops genericbl_ops = {
42600+static const struct backlight_ops genericbl_ops = {
42601 .options = BL_CORE_SUSPENDRESUME,
42602 .get_brightness = genericbl_get_intensity,
42603 .update_status = genericbl_send_intensity,
42604diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
42605index 7fb4eef..f7cc528 100644
42606--- a/drivers/video/backlight/hp680_bl.c
42607+++ b/drivers/video/backlight/hp680_bl.c
42608@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
42609 return current_intensity;
42610 }
42611
42612-static struct backlight_ops hp680bl_ops = {
42613+static const struct backlight_ops hp680bl_ops = {
42614 .get_brightness = hp680bl_get_intensity,
42615 .update_status = hp680bl_set_intensity,
42616 };
42617diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
42618index 7aed256..db9071f 100644
42619--- a/drivers/video/backlight/jornada720_bl.c
42620+++ b/drivers/video/backlight/jornada720_bl.c
42621@@ -93,7 +93,7 @@ out:
42622 return ret;
42623 }
42624
42625-static struct backlight_ops jornada_bl_ops = {
42626+static const struct backlight_ops jornada_bl_ops = {
42627 .get_brightness = jornada_bl_get_brightness,
42628 .update_status = jornada_bl_update_status,
42629 .options = BL_CORE_SUSPENDRESUME,
42630diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
42631index a38fda1..939e7b8 100644
42632--- a/drivers/video/backlight/kb3886_bl.c
42633+++ b/drivers/video/backlight/kb3886_bl.c
42634@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
42635 return kb3886bl_intensity;
42636 }
42637
42638-static struct backlight_ops kb3886bl_ops = {
42639+static const struct backlight_ops kb3886bl_ops = {
42640 .get_brightness = kb3886bl_get_intensity,
42641 .update_status = kb3886bl_send_intensity,
42642 };
42643diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
42644index 6b488b8..00a9591 100644
42645--- a/drivers/video/backlight/locomolcd.c
42646+++ b/drivers/video/backlight/locomolcd.c
42647@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
42648 return current_intensity;
42649 }
42650
42651-static struct backlight_ops locomobl_data = {
42652+static const struct backlight_ops locomobl_data = {
42653 .get_brightness = locomolcd_get_intensity,
42654 .update_status = locomolcd_set_intensity,
42655 };
42656diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
42657index 99bdfa8..3dac448 100644
42658--- a/drivers/video/backlight/mbp_nvidia_bl.c
42659+++ b/drivers/video/backlight/mbp_nvidia_bl.c
42660@@ -33,7 +33,7 @@ struct dmi_match_data {
42661 unsigned long iostart;
42662 unsigned long iolen;
42663 /* Backlight operations structure. */
42664- struct backlight_ops backlight_ops;
42665+ const struct backlight_ops backlight_ops;
42666 };
42667
42668 /* Module parameters. */
42669diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
42670index cbad67e..3cf900e 100644
42671--- a/drivers/video/backlight/omap1_bl.c
42672+++ b/drivers/video/backlight/omap1_bl.c
42673@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
42674 return bl->current_intensity;
42675 }
42676
42677-static struct backlight_ops omapbl_ops = {
42678+static const struct backlight_ops omapbl_ops = {
42679 .get_brightness = omapbl_get_intensity,
42680 .update_status = omapbl_update_status,
42681 };
42682diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
42683index 9edaf24..075786e 100644
42684--- a/drivers/video/backlight/progear_bl.c
42685+++ b/drivers/video/backlight/progear_bl.c
42686@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
42687 return intensity - HW_LEVEL_MIN;
42688 }
42689
42690-static struct backlight_ops progearbl_ops = {
42691+static const struct backlight_ops progearbl_ops = {
42692 .get_brightness = progearbl_get_intensity,
42693 .update_status = progearbl_set_intensity,
42694 };
42695diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
42696index 8871662..df9e0b3 100644
42697--- a/drivers/video/backlight/pwm_bl.c
42698+++ b/drivers/video/backlight/pwm_bl.c
42699@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
42700 return bl->props.brightness;
42701 }
42702
42703-static struct backlight_ops pwm_backlight_ops = {
42704+static const struct backlight_ops pwm_backlight_ops = {
42705 .update_status = pwm_backlight_update_status,
42706 .get_brightness = pwm_backlight_get_brightness,
42707 };
42708diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
42709index 43edbad..e14ce4d 100644
42710--- a/drivers/video/backlight/tosa_bl.c
42711+++ b/drivers/video/backlight/tosa_bl.c
42712@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
42713 return props->brightness;
42714 }
42715
42716-static struct backlight_ops bl_ops = {
42717+static const struct backlight_ops bl_ops = {
42718 .get_brightness = tosa_bl_get_brightness,
42719 .update_status = tosa_bl_update_status,
42720 };
42721diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
42722index 467bdb7..e32add3 100644
42723--- a/drivers/video/backlight/wm831x_bl.c
42724+++ b/drivers/video/backlight/wm831x_bl.c
42725@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
42726 return data->current_brightness;
42727 }
42728
42729-static struct backlight_ops wm831x_backlight_ops = {
42730+static const struct backlight_ops wm831x_backlight_ops = {
42731 .options = BL_CORE_SUSPENDRESUME,
42732 .update_status = wm831x_backlight_update_status,
42733 .get_brightness = wm831x_backlight_get_brightness,
42734diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
42735index e49ae5e..db4e6f7 100644
42736--- a/drivers/video/bf54x-lq043fb.c
42737+++ b/drivers/video/bf54x-lq043fb.c
42738@@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
42739 return 0;
42740 }
42741
42742-static struct backlight_ops bfin_lq043fb_bl_ops = {
42743+static const struct backlight_ops bfin_lq043fb_bl_ops = {
42744 .get_brightness = bl_get_brightness,
42745 };
42746
42747diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
42748index 2c72a7c..d523e52 100644
42749--- a/drivers/video/bfin-t350mcqb-fb.c
42750+++ b/drivers/video/bfin-t350mcqb-fb.c
42751@@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
42752 return 0;
42753 }
42754
42755-static struct backlight_ops bfin_lq043fb_bl_ops = {
42756+static const struct backlight_ops bfin_lq043fb_bl_ops = {
42757 .get_brightness = bl_get_brightness,
42758 };
42759
42760diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
42761index f53b9f1..958bf4e 100644
42762--- a/drivers/video/fbcmap.c
42763+++ b/drivers/video/fbcmap.c
42764@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
42765 rc = -ENODEV;
42766 goto out;
42767 }
42768- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
42769- !info->fbops->fb_setcmap)) {
42770+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
42771 rc = -EINVAL;
42772 goto out1;
42773 }
42774diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
42775index 99bbd28..ad3829e 100644
42776--- a/drivers/video/fbmem.c
42777+++ b/drivers/video/fbmem.c
42778@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
42779 image->dx += image->width + 8;
42780 }
42781 } else if (rotate == FB_ROTATE_UD) {
42782- for (x = 0; x < num && image->dx >= 0; x++) {
42783+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
42784 info->fbops->fb_imageblit(info, image);
42785 image->dx -= image->width + 8;
42786 }
42787@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
42788 image->dy += image->height + 8;
42789 }
42790 } else if (rotate == FB_ROTATE_CCW) {
42791- for (x = 0; x < num && image->dy >= 0; x++) {
42792+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
42793 info->fbops->fb_imageblit(info, image);
42794 image->dy -= image->height + 8;
42795 }
42796@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
42797 int flags = info->flags;
42798 int ret = 0;
42799
42800+ pax_track_stack();
42801+
42802 if (var->activate & FB_ACTIVATE_INV_MODE) {
42803 struct fb_videomode mode1, mode2;
42804
42805@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
42806 void __user *argp = (void __user *)arg;
42807 long ret = 0;
42808
42809+ pax_track_stack();
42810+
42811 switch (cmd) {
42812 case FBIOGET_VSCREENINFO:
42813 if (!lock_fb_info(info))
42814@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
42815 return -EFAULT;
42816 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
42817 return -EINVAL;
42818- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
42819+ if (con2fb.framebuffer >= FB_MAX)
42820 return -EINVAL;
42821 if (!registered_fb[con2fb.framebuffer])
42822 request_module("fb%d", con2fb.framebuffer);
42823diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
42824index f20eff8..3e4f622 100644
42825--- a/drivers/video/geode/gx1fb_core.c
42826+++ b/drivers/video/geode/gx1fb_core.c
42827@@ -30,7 +30,7 @@ static int crt_option = 1;
42828 static char panel_option[32] = "";
42829
42830 /* Modes relevant to the GX1 (taken from modedb.c) */
42831-static const struct fb_videomode __initdata gx1_modedb[] = {
42832+static const struct fb_videomode __initconst gx1_modedb[] = {
42833 /* 640x480-60 VESA */
42834 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
42835 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
42836diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
42837index 896e53d..4d87d0b 100644
42838--- a/drivers/video/gxt4500.c
42839+++ b/drivers/video/gxt4500.c
42840@@ -156,7 +156,7 @@ struct gxt4500_par {
42841 static char *mode_option;
42842
42843 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
42844-static const struct fb_videomode defaultmode __devinitdata = {
42845+static const struct fb_videomode defaultmode __devinitconst = {
42846 .refresh = 60,
42847 .xres = 1280,
42848 .yres = 1024,
42849@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
42850 return 0;
42851 }
42852
42853-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
42854+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
42855 .id = "IBM GXT4500P",
42856 .type = FB_TYPE_PACKED_PIXELS,
42857 .visual = FB_VISUAL_PSEUDOCOLOR,
42858diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
42859index f5bedee..28c6028 100644
42860--- a/drivers/video/i810/i810_accel.c
42861+++ b/drivers/video/i810/i810_accel.c
42862@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
42863 }
42864 }
42865 printk("ringbuffer lockup!!!\n");
42866+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
42867 i810_report_error(mmio);
42868 par->dev_flags |= LOCKUP;
42869 info->pixmap.scan_align = 1;
42870diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
42871index 5743ea2..457f82c 100644
42872--- a/drivers/video/i810/i810_main.c
42873+++ b/drivers/video/i810/i810_main.c
42874@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
42875 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
42876
42877 /* PCI */
42878-static const char *i810_pci_list[] __devinitdata = {
42879+static const char *i810_pci_list[] __devinitconst = {
42880 "Intel(R) 810 Framebuffer Device" ,
42881 "Intel(R) 810-DC100 Framebuffer Device" ,
42882 "Intel(R) 810E Framebuffer Device" ,
42883diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
42884index 3c14e43..eafa544 100644
42885--- a/drivers/video/logo/logo_linux_clut224.ppm
42886+++ b/drivers/video/logo/logo_linux_clut224.ppm
42887@@ -1,1604 +1,1123 @@
42888 P3
42889-# Standard 224-color Linux logo
42890 80 80
42891 255
42892- 0 0 0 0 0 0 0 0 0 0 0 0
42893- 0 0 0 0 0 0 0 0 0 0 0 0
42894- 0 0 0 0 0 0 0 0 0 0 0 0
42895- 0 0 0 0 0 0 0 0 0 0 0 0
42896- 0 0 0 0 0 0 0 0 0 0 0 0
42897- 0 0 0 0 0 0 0 0 0 0 0 0
42898- 0 0 0 0 0 0 0 0 0 0 0 0
42899- 0 0 0 0 0 0 0 0 0 0 0 0
42900- 0 0 0 0 0 0 0 0 0 0 0 0
42901- 6 6 6 6 6 6 10 10 10 10 10 10
42902- 10 10 10 6 6 6 6 6 6 6 6 6
42903- 0 0 0 0 0 0 0 0 0 0 0 0
42904- 0 0 0 0 0 0 0 0 0 0 0 0
42905- 0 0 0 0 0 0 0 0 0 0 0 0
42906- 0 0 0 0 0 0 0 0 0 0 0 0
42907- 0 0 0 0 0 0 0 0 0 0 0 0
42908- 0 0 0 0 0 0 0 0 0 0 0 0
42909- 0 0 0 0 0 0 0 0 0 0 0 0
42910- 0 0 0 0 0 0 0 0 0 0 0 0
42911- 0 0 0 0 0 0 0 0 0 0 0 0
42912- 0 0 0 0 0 0 0 0 0 0 0 0
42913- 0 0 0 0 0 0 0 0 0 0 0 0
42914- 0 0 0 0 0 0 0 0 0 0 0 0
42915- 0 0 0 0 0 0 0 0 0 0 0 0
42916- 0 0 0 0 0 0 0 0 0 0 0 0
42917- 0 0 0 0 0 0 0 0 0 0 0 0
42918- 0 0 0 0 0 0 0 0 0 0 0 0
42919- 0 0 0 0 0 0 0 0 0 0 0 0
42920- 0 0 0 6 6 6 10 10 10 14 14 14
42921- 22 22 22 26 26 26 30 30 30 34 34 34
42922- 30 30 30 30 30 30 26 26 26 18 18 18
42923- 14 14 14 10 10 10 6 6 6 0 0 0
42924- 0 0 0 0 0 0 0 0 0 0 0 0
42925- 0 0 0 0 0 0 0 0 0 0 0 0
42926- 0 0 0 0 0 0 0 0 0 0 0 0
42927- 0 0 0 0 0 0 0 0 0 0 0 0
42928- 0 0 0 0 0 0 0 0 0 0 0 0
42929- 0 0 0 0 0 0 0 0 0 0 0 0
42930- 0 0 0 0 0 0 0 0 0 0 0 0
42931- 0 0 0 0 0 0 0 0 0 0 0 0
42932- 0 0 0 0 0 0 0 0 0 0 0 0
42933- 0 0 0 0 0 1 0 0 1 0 0 0
42934- 0 0 0 0 0 0 0 0 0 0 0 0
42935- 0 0 0 0 0 0 0 0 0 0 0 0
42936- 0 0 0 0 0 0 0 0 0 0 0 0
42937- 0 0 0 0 0 0 0 0 0 0 0 0
42938- 0 0 0 0 0 0 0 0 0 0 0 0
42939- 0 0 0 0 0 0 0 0 0 0 0 0
42940- 6 6 6 14 14 14 26 26 26 42 42 42
42941- 54 54 54 66 66 66 78 78 78 78 78 78
42942- 78 78 78 74 74 74 66 66 66 54 54 54
42943- 42 42 42 26 26 26 18 18 18 10 10 10
42944- 6 6 6 0 0 0 0 0 0 0 0 0
42945- 0 0 0 0 0 0 0 0 0 0 0 0
42946- 0 0 0 0 0 0 0 0 0 0 0 0
42947- 0 0 0 0 0 0 0 0 0 0 0 0
42948- 0 0 0 0 0 0 0 0 0 0 0 0
42949- 0 0 0 0 0 0 0 0 0 0 0 0
42950- 0 0 0 0 0 0 0 0 0 0 0 0
42951- 0 0 0 0 0 0 0 0 0 0 0 0
42952- 0 0 0 0 0 0 0 0 0 0 0 0
42953- 0 0 1 0 0 0 0 0 0 0 0 0
42954- 0 0 0 0 0 0 0 0 0 0 0 0
42955- 0 0 0 0 0 0 0 0 0 0 0 0
42956- 0 0 0 0 0 0 0 0 0 0 0 0
42957- 0 0 0 0 0 0 0 0 0 0 0 0
42958- 0 0 0 0 0 0 0 0 0 0 0 0
42959- 0 0 0 0 0 0 0 0 0 10 10 10
42960- 22 22 22 42 42 42 66 66 66 86 86 86
42961- 66 66 66 38 38 38 38 38 38 22 22 22
42962- 26 26 26 34 34 34 54 54 54 66 66 66
42963- 86 86 86 70 70 70 46 46 46 26 26 26
42964- 14 14 14 6 6 6 0 0 0 0 0 0
42965- 0 0 0 0 0 0 0 0 0 0 0 0
42966- 0 0 0 0 0 0 0 0 0 0 0 0
42967- 0 0 0 0 0 0 0 0 0 0 0 0
42968- 0 0 0 0 0 0 0 0 0 0 0 0
42969- 0 0 0 0 0 0 0 0 0 0 0 0
42970- 0 0 0 0 0 0 0 0 0 0 0 0
42971- 0 0 0 0 0 0 0 0 0 0 0 0
42972- 0 0 0 0 0 0 0 0 0 0 0 0
42973- 0 0 1 0 0 1 0 0 1 0 0 0
42974- 0 0 0 0 0 0 0 0 0 0 0 0
42975- 0 0 0 0 0 0 0 0 0 0 0 0
42976- 0 0 0 0 0 0 0 0 0 0 0 0
42977- 0 0 0 0 0 0 0 0 0 0 0 0
42978- 0 0 0 0 0 0 0 0 0 0 0 0
42979- 0 0 0 0 0 0 10 10 10 26 26 26
42980- 50 50 50 82 82 82 58 58 58 6 6 6
42981- 2 2 6 2 2 6 2 2 6 2 2 6
42982- 2 2 6 2 2 6 2 2 6 2 2 6
42983- 6 6 6 54 54 54 86 86 86 66 66 66
42984- 38 38 38 18 18 18 6 6 6 0 0 0
42985- 0 0 0 0 0 0 0 0 0 0 0 0
42986- 0 0 0 0 0 0 0 0 0 0 0 0
42987- 0 0 0 0 0 0 0 0 0 0 0 0
42988- 0 0 0 0 0 0 0 0 0 0 0 0
42989- 0 0 0 0 0 0 0 0 0 0 0 0
42990- 0 0 0 0 0 0 0 0 0 0 0 0
42991- 0 0 0 0 0 0 0 0 0 0 0 0
42992- 0 0 0 0 0 0 0 0 0 0 0 0
42993- 0 0 0 0 0 0 0 0 0 0 0 0
42994- 0 0 0 0 0 0 0 0 0 0 0 0
42995- 0 0 0 0 0 0 0 0 0 0 0 0
42996- 0 0 0 0 0 0 0 0 0 0 0 0
42997- 0 0 0 0 0 0 0 0 0 0 0 0
42998- 0 0 0 0 0 0 0 0 0 0 0 0
42999- 0 0 0 6 6 6 22 22 22 50 50 50
43000- 78 78 78 34 34 34 2 2 6 2 2 6
43001- 2 2 6 2 2 6 2 2 6 2 2 6
43002- 2 2 6 2 2 6 2 2 6 2 2 6
43003- 2 2 6 2 2 6 6 6 6 70 70 70
43004- 78 78 78 46 46 46 22 22 22 6 6 6
43005- 0 0 0 0 0 0 0 0 0 0 0 0
43006- 0 0 0 0 0 0 0 0 0 0 0 0
43007- 0 0 0 0 0 0 0 0 0 0 0 0
43008- 0 0 0 0 0 0 0 0 0 0 0 0
43009- 0 0 0 0 0 0 0 0 0 0 0 0
43010- 0 0 0 0 0 0 0 0 0 0 0 0
43011- 0 0 0 0 0 0 0 0 0 0 0 0
43012- 0 0 0 0 0 0 0 0 0 0 0 0
43013- 0 0 1 0 0 1 0 0 1 0 0 0
43014- 0 0 0 0 0 0 0 0 0 0 0 0
43015- 0 0 0 0 0 0 0 0 0 0 0 0
43016- 0 0 0 0 0 0 0 0 0 0 0 0
43017- 0 0 0 0 0 0 0 0 0 0 0 0
43018- 0 0 0 0 0 0 0 0 0 0 0 0
43019- 6 6 6 18 18 18 42 42 42 82 82 82
43020- 26 26 26 2 2 6 2 2 6 2 2 6
43021- 2 2 6 2 2 6 2 2 6 2 2 6
43022- 2 2 6 2 2 6 2 2 6 14 14 14
43023- 46 46 46 34 34 34 6 6 6 2 2 6
43024- 42 42 42 78 78 78 42 42 42 18 18 18
43025- 6 6 6 0 0 0 0 0 0 0 0 0
43026- 0 0 0 0 0 0 0 0 0 0 0 0
43027- 0 0 0 0 0 0 0 0 0 0 0 0
43028- 0 0 0 0 0 0 0 0 0 0 0 0
43029- 0 0 0 0 0 0 0 0 0 0 0 0
43030- 0 0 0 0 0 0 0 0 0 0 0 0
43031- 0 0 0 0 0 0 0 0 0 0 0 0
43032- 0 0 0 0 0 0 0 0 0 0 0 0
43033- 0 0 1 0 0 0 0 0 1 0 0 0
43034- 0 0 0 0 0 0 0 0 0 0 0 0
43035- 0 0 0 0 0 0 0 0 0 0 0 0
43036- 0 0 0 0 0 0 0 0 0 0 0 0
43037- 0 0 0 0 0 0 0 0 0 0 0 0
43038- 0 0 0 0 0 0 0 0 0 0 0 0
43039- 10 10 10 30 30 30 66 66 66 58 58 58
43040- 2 2 6 2 2 6 2 2 6 2 2 6
43041- 2 2 6 2 2 6 2 2 6 2 2 6
43042- 2 2 6 2 2 6 2 2 6 26 26 26
43043- 86 86 86 101 101 101 46 46 46 10 10 10
43044- 2 2 6 58 58 58 70 70 70 34 34 34
43045- 10 10 10 0 0 0 0 0 0 0 0 0
43046- 0 0 0 0 0 0 0 0 0 0 0 0
43047- 0 0 0 0 0 0 0 0 0 0 0 0
43048- 0 0 0 0 0 0 0 0 0 0 0 0
43049- 0 0 0 0 0 0 0 0 0 0 0 0
43050- 0 0 0 0 0 0 0 0 0 0 0 0
43051- 0 0 0 0 0 0 0 0 0 0 0 0
43052- 0 0 0 0 0 0 0 0 0 0 0 0
43053- 0 0 1 0 0 1 0 0 1 0 0 0
43054- 0 0 0 0 0 0 0 0 0 0 0 0
43055- 0 0 0 0 0 0 0 0 0 0 0 0
43056- 0 0 0 0 0 0 0 0 0 0 0 0
43057- 0 0 0 0 0 0 0 0 0 0 0 0
43058- 0 0 0 0 0 0 0 0 0 0 0 0
43059- 14 14 14 42 42 42 86 86 86 10 10 10
43060- 2 2 6 2 2 6 2 2 6 2 2 6
43061- 2 2 6 2 2 6 2 2 6 2 2 6
43062- 2 2 6 2 2 6 2 2 6 30 30 30
43063- 94 94 94 94 94 94 58 58 58 26 26 26
43064- 2 2 6 6 6 6 78 78 78 54 54 54
43065- 22 22 22 6 6 6 0 0 0 0 0 0
43066- 0 0 0 0 0 0 0 0 0 0 0 0
43067- 0 0 0 0 0 0 0 0 0 0 0 0
43068- 0 0 0 0 0 0 0 0 0 0 0 0
43069- 0 0 0 0 0 0 0 0 0 0 0 0
43070- 0 0 0 0 0 0 0 0 0 0 0 0
43071- 0 0 0 0 0 0 0 0 0 0 0 0
43072- 0 0 0 0 0 0 0 0 0 0 0 0
43073- 0 0 0 0 0 0 0 0 0 0 0 0
43074- 0 0 0 0 0 0 0 0 0 0 0 0
43075- 0 0 0 0 0 0 0 0 0 0 0 0
43076- 0 0 0 0 0 0 0 0 0 0 0 0
43077- 0 0 0 0 0 0 0 0 0 0 0 0
43078- 0 0 0 0 0 0 0 0 0 6 6 6
43079- 22 22 22 62 62 62 62 62 62 2 2 6
43080- 2 2 6 2 2 6 2 2 6 2 2 6
43081- 2 2 6 2 2 6 2 2 6 2 2 6
43082- 2 2 6 2 2 6 2 2 6 26 26 26
43083- 54 54 54 38 38 38 18 18 18 10 10 10
43084- 2 2 6 2 2 6 34 34 34 82 82 82
43085- 38 38 38 14 14 14 0 0 0 0 0 0
43086- 0 0 0 0 0 0 0 0 0 0 0 0
43087- 0 0 0 0 0 0 0 0 0 0 0 0
43088- 0 0 0 0 0 0 0 0 0 0 0 0
43089- 0 0 0 0 0 0 0 0 0 0 0 0
43090- 0 0 0 0 0 0 0 0 0 0 0 0
43091- 0 0 0 0 0 0 0 0 0 0 0 0
43092- 0 0 0 0 0 0 0 0 0 0 0 0
43093- 0 0 0 0 0 1 0 0 1 0 0 0
43094- 0 0 0 0 0 0 0 0 0 0 0 0
43095- 0 0 0 0 0 0 0 0 0 0 0 0
43096- 0 0 0 0 0 0 0 0 0 0 0 0
43097- 0 0 0 0 0 0 0 0 0 0 0 0
43098- 0 0 0 0 0 0 0 0 0 6 6 6
43099- 30 30 30 78 78 78 30 30 30 2 2 6
43100- 2 2 6 2 2 6 2 2 6 2 2 6
43101- 2 2 6 2 2 6 2 2 6 2 2 6
43102- 2 2 6 2 2 6 2 2 6 10 10 10
43103- 10 10 10 2 2 6 2 2 6 2 2 6
43104- 2 2 6 2 2 6 2 2 6 78 78 78
43105- 50 50 50 18 18 18 6 6 6 0 0 0
43106- 0 0 0 0 0 0 0 0 0 0 0 0
43107- 0 0 0 0 0 0 0 0 0 0 0 0
43108- 0 0 0 0 0 0 0 0 0 0 0 0
43109- 0 0 0 0 0 0 0 0 0 0 0 0
43110- 0 0 0 0 0 0 0 0 0 0 0 0
43111- 0 0 0 0 0 0 0 0 0 0 0 0
43112- 0 0 0 0 0 0 0 0 0 0 0 0
43113- 0 0 1 0 0 0 0 0 0 0 0 0
43114- 0 0 0 0 0 0 0 0 0 0 0 0
43115- 0 0 0 0 0 0 0 0 0 0 0 0
43116- 0 0 0 0 0 0 0 0 0 0 0 0
43117- 0 0 0 0 0 0 0 0 0 0 0 0
43118- 0 0 0 0 0 0 0 0 0 10 10 10
43119- 38 38 38 86 86 86 14 14 14 2 2 6
43120- 2 2 6 2 2 6 2 2 6 2 2 6
43121- 2 2 6 2 2 6 2 2 6 2 2 6
43122- 2 2 6 2 2 6 2 2 6 2 2 6
43123- 2 2 6 2 2 6 2 2 6 2 2 6
43124- 2 2 6 2 2 6 2 2 6 54 54 54
43125- 66 66 66 26 26 26 6 6 6 0 0 0
43126- 0 0 0 0 0 0 0 0 0 0 0 0
43127- 0 0 0 0 0 0 0 0 0 0 0 0
43128- 0 0 0 0 0 0 0 0 0 0 0 0
43129- 0 0 0 0 0 0 0 0 0 0 0 0
43130- 0 0 0 0 0 0 0 0 0 0 0 0
43131- 0 0 0 0 0 0 0 0 0 0 0 0
43132- 0 0 0 0 0 0 0 0 0 0 0 0
43133- 0 0 0 0 0 1 0 0 1 0 0 0
43134- 0 0 0 0 0 0 0 0 0 0 0 0
43135- 0 0 0 0 0 0 0 0 0 0 0 0
43136- 0 0 0 0 0 0 0 0 0 0 0 0
43137- 0 0 0 0 0 0 0 0 0 0 0 0
43138- 0 0 0 0 0 0 0 0 0 14 14 14
43139- 42 42 42 82 82 82 2 2 6 2 2 6
43140- 2 2 6 6 6 6 10 10 10 2 2 6
43141- 2 2 6 2 2 6 2 2 6 2 2 6
43142- 2 2 6 2 2 6 2 2 6 6 6 6
43143- 14 14 14 10 10 10 2 2 6 2 2 6
43144- 2 2 6 2 2 6 2 2 6 18 18 18
43145- 82 82 82 34 34 34 10 10 10 0 0 0
43146- 0 0 0 0 0 0 0 0 0 0 0 0
43147- 0 0 0 0 0 0 0 0 0 0 0 0
43148- 0 0 0 0 0 0 0 0 0 0 0 0
43149- 0 0 0 0 0 0 0 0 0 0 0 0
43150- 0 0 0 0 0 0 0 0 0 0 0 0
43151- 0 0 0 0 0 0 0 0 0 0 0 0
43152- 0 0 0 0 0 0 0 0 0 0 0 0
43153- 0 0 1 0 0 0 0 0 0 0 0 0
43154- 0 0 0 0 0 0 0 0 0 0 0 0
43155- 0 0 0 0 0 0 0 0 0 0 0 0
43156- 0 0 0 0 0 0 0 0 0 0 0 0
43157- 0 0 0 0 0 0 0 0 0 0 0 0
43158- 0 0 0 0 0 0 0 0 0 14 14 14
43159- 46 46 46 86 86 86 2 2 6 2 2 6
43160- 6 6 6 6 6 6 22 22 22 34 34 34
43161- 6 6 6 2 2 6 2 2 6 2 2 6
43162- 2 2 6 2 2 6 18 18 18 34 34 34
43163- 10 10 10 50 50 50 22 22 22 2 2 6
43164- 2 2 6 2 2 6 2 2 6 10 10 10
43165- 86 86 86 42 42 42 14 14 14 0 0 0
43166- 0 0 0 0 0 0 0 0 0 0 0 0
43167- 0 0 0 0 0 0 0 0 0 0 0 0
43168- 0 0 0 0 0 0 0 0 0 0 0 0
43169- 0 0 0 0 0 0 0 0 0 0 0 0
43170- 0 0 0 0 0 0 0 0 0 0 0 0
43171- 0 0 0 0 0 0 0 0 0 0 0 0
43172- 0 0 0 0 0 0 0 0 0 0 0 0
43173- 0 0 1 0 0 1 0 0 1 0 0 0
43174- 0 0 0 0 0 0 0 0 0 0 0 0
43175- 0 0 0 0 0 0 0 0 0 0 0 0
43176- 0 0 0 0 0 0 0 0 0 0 0 0
43177- 0 0 0 0 0 0 0 0 0 0 0 0
43178- 0 0 0 0 0 0 0 0 0 14 14 14
43179- 46 46 46 86 86 86 2 2 6 2 2 6
43180- 38 38 38 116 116 116 94 94 94 22 22 22
43181- 22 22 22 2 2 6 2 2 6 2 2 6
43182- 14 14 14 86 86 86 138 138 138 162 162 162
43183-154 154 154 38 38 38 26 26 26 6 6 6
43184- 2 2 6 2 2 6 2 2 6 2 2 6
43185- 86 86 86 46 46 46 14 14 14 0 0 0
43186- 0 0 0 0 0 0 0 0 0 0 0 0
43187- 0 0 0 0 0 0 0 0 0 0 0 0
43188- 0 0 0 0 0 0 0 0 0 0 0 0
43189- 0 0 0 0 0 0 0 0 0 0 0 0
43190- 0 0 0 0 0 0 0 0 0 0 0 0
43191- 0 0 0 0 0 0 0 0 0 0 0 0
43192- 0 0 0 0 0 0 0 0 0 0 0 0
43193- 0 0 0 0 0 0 0 0 0 0 0 0
43194- 0 0 0 0 0 0 0 0 0 0 0 0
43195- 0 0 0 0 0 0 0 0 0 0 0 0
43196- 0 0 0 0 0 0 0 0 0 0 0 0
43197- 0 0 0 0 0 0 0 0 0 0 0 0
43198- 0 0 0 0 0 0 0 0 0 14 14 14
43199- 46 46 46 86 86 86 2 2 6 14 14 14
43200-134 134 134 198 198 198 195 195 195 116 116 116
43201- 10 10 10 2 2 6 2 2 6 6 6 6
43202-101 98 89 187 187 187 210 210 210 218 218 218
43203-214 214 214 134 134 134 14 14 14 6 6 6
43204- 2 2 6 2 2 6 2 2 6 2 2 6
43205- 86 86 86 50 50 50 18 18 18 6 6 6
43206- 0 0 0 0 0 0 0 0 0 0 0 0
43207- 0 0 0 0 0 0 0 0 0 0 0 0
43208- 0 0 0 0 0 0 0 0 0 0 0 0
43209- 0 0 0 0 0 0 0 0 0 0 0 0
43210- 0 0 0 0 0 0 0 0 0 0 0 0
43211- 0 0 0 0 0 0 0 0 0 0 0 0
43212- 0 0 0 0 0 0 0 0 1 0 0 0
43213- 0 0 1 0 0 1 0 0 1 0 0 0
43214- 0 0 0 0 0 0 0 0 0 0 0 0
43215- 0 0 0 0 0 0 0 0 0 0 0 0
43216- 0 0 0 0 0 0 0 0 0 0 0 0
43217- 0 0 0 0 0 0 0 0 0 0 0 0
43218- 0 0 0 0 0 0 0 0 0 14 14 14
43219- 46 46 46 86 86 86 2 2 6 54 54 54
43220-218 218 218 195 195 195 226 226 226 246 246 246
43221- 58 58 58 2 2 6 2 2 6 30 30 30
43222-210 210 210 253 253 253 174 174 174 123 123 123
43223-221 221 221 234 234 234 74 74 74 2 2 6
43224- 2 2 6 2 2 6 2 2 6 2 2 6
43225- 70 70 70 58 58 58 22 22 22 6 6 6
43226- 0 0 0 0 0 0 0 0 0 0 0 0
43227- 0 0 0 0 0 0 0 0 0 0 0 0
43228- 0 0 0 0 0 0 0 0 0 0 0 0
43229- 0 0 0 0 0 0 0 0 0 0 0 0
43230- 0 0 0 0 0 0 0 0 0 0 0 0
43231- 0 0 0 0 0 0 0 0 0 0 0 0
43232- 0 0 0 0 0 0 0 0 0 0 0 0
43233- 0 0 0 0 0 0 0 0 0 0 0 0
43234- 0 0 0 0 0 0 0 0 0 0 0 0
43235- 0 0 0 0 0 0 0 0 0 0 0 0
43236- 0 0 0 0 0 0 0 0 0 0 0 0
43237- 0 0 0 0 0 0 0 0 0 0 0 0
43238- 0 0 0 0 0 0 0 0 0 14 14 14
43239- 46 46 46 82 82 82 2 2 6 106 106 106
43240-170 170 170 26 26 26 86 86 86 226 226 226
43241-123 123 123 10 10 10 14 14 14 46 46 46
43242-231 231 231 190 190 190 6 6 6 70 70 70
43243- 90 90 90 238 238 238 158 158 158 2 2 6
43244- 2 2 6 2 2 6 2 2 6 2 2 6
43245- 70 70 70 58 58 58 22 22 22 6 6 6
43246- 0 0 0 0 0 0 0 0 0 0 0 0
43247- 0 0 0 0 0 0 0 0 0 0 0 0
43248- 0 0 0 0 0 0 0 0 0 0 0 0
43249- 0 0 0 0 0 0 0 0 0 0 0 0
43250- 0 0 0 0 0 0 0 0 0 0 0 0
43251- 0 0 0 0 0 0 0 0 0 0 0 0
43252- 0 0 0 0 0 0 0 0 1 0 0 0
43253- 0 0 1 0 0 1 0 0 1 0 0 0
43254- 0 0 0 0 0 0 0 0 0 0 0 0
43255- 0 0 0 0 0 0 0 0 0 0 0 0
43256- 0 0 0 0 0 0 0 0 0 0 0 0
43257- 0 0 0 0 0 0 0 0 0 0 0 0
43258- 0 0 0 0 0 0 0 0 0 14 14 14
43259- 42 42 42 86 86 86 6 6 6 116 116 116
43260-106 106 106 6 6 6 70 70 70 149 149 149
43261-128 128 128 18 18 18 38 38 38 54 54 54
43262-221 221 221 106 106 106 2 2 6 14 14 14
43263- 46 46 46 190 190 190 198 198 198 2 2 6
43264- 2 2 6 2 2 6 2 2 6 2 2 6
43265- 74 74 74 62 62 62 22 22 22 6 6 6
43266- 0 0 0 0 0 0 0 0 0 0 0 0
43267- 0 0 0 0 0 0 0 0 0 0 0 0
43268- 0 0 0 0 0 0 0 0 0 0 0 0
43269- 0 0 0 0 0 0 0 0 0 0 0 0
43270- 0 0 0 0 0 0 0 0 0 0 0 0
43271- 0 0 0 0 0 0 0 0 0 0 0 0
43272- 0 0 0 0 0 0 0 0 1 0 0 0
43273- 0 0 1 0 0 0 0 0 1 0 0 0
43274- 0 0 0 0 0 0 0 0 0 0 0 0
43275- 0 0 0 0 0 0 0 0 0 0 0 0
43276- 0 0 0 0 0 0 0 0 0 0 0 0
43277- 0 0 0 0 0 0 0 0 0 0 0 0
43278- 0 0 0 0 0 0 0 0 0 14 14 14
43279- 42 42 42 94 94 94 14 14 14 101 101 101
43280-128 128 128 2 2 6 18 18 18 116 116 116
43281-118 98 46 121 92 8 121 92 8 98 78 10
43282-162 162 162 106 106 106 2 2 6 2 2 6
43283- 2 2 6 195 195 195 195 195 195 6 6 6
43284- 2 2 6 2 2 6 2 2 6 2 2 6
43285- 74 74 74 62 62 62 22 22 22 6 6 6
43286- 0 0 0 0 0 0 0 0 0 0 0 0
43287- 0 0 0 0 0 0 0 0 0 0 0 0
43288- 0 0 0 0 0 0 0 0 0 0 0 0
43289- 0 0 0 0 0 0 0 0 0 0 0 0
43290- 0 0 0 0 0 0 0 0 0 0 0 0
43291- 0 0 0 0 0 0 0 0 0 0 0 0
43292- 0 0 0 0 0 0 0 0 1 0 0 1
43293- 0 0 1 0 0 0 0 0 1 0 0 0
43294- 0 0 0 0 0 0 0 0 0 0 0 0
43295- 0 0 0 0 0 0 0 0 0 0 0 0
43296- 0 0 0 0 0 0 0 0 0 0 0 0
43297- 0 0 0 0 0 0 0 0 0 0 0 0
43298- 0 0 0 0 0 0 0 0 0 10 10 10
43299- 38 38 38 90 90 90 14 14 14 58 58 58
43300-210 210 210 26 26 26 54 38 6 154 114 10
43301-226 170 11 236 186 11 225 175 15 184 144 12
43302-215 174 15 175 146 61 37 26 9 2 2 6
43303- 70 70 70 246 246 246 138 138 138 2 2 6
43304- 2 2 6 2 2 6 2 2 6 2 2 6
43305- 70 70 70 66 66 66 26 26 26 6 6 6
43306- 0 0 0 0 0 0 0 0 0 0 0 0
43307- 0 0 0 0 0 0 0 0 0 0 0 0
43308- 0 0 0 0 0 0 0 0 0 0 0 0
43309- 0 0 0 0 0 0 0 0 0 0 0 0
43310- 0 0 0 0 0 0 0 0 0 0 0 0
43311- 0 0 0 0 0 0 0 0 0 0 0 0
43312- 0 0 0 0 0 0 0 0 0 0 0 0
43313- 0 0 0 0 0 0 0 0 0 0 0 0
43314- 0 0 0 0 0 0 0 0 0 0 0 0
43315- 0 0 0 0 0 0 0 0 0 0 0 0
43316- 0 0 0 0 0 0 0 0 0 0 0 0
43317- 0 0 0 0 0 0 0 0 0 0 0 0
43318- 0 0 0 0 0 0 0 0 0 10 10 10
43319- 38 38 38 86 86 86 14 14 14 10 10 10
43320-195 195 195 188 164 115 192 133 9 225 175 15
43321-239 182 13 234 190 10 232 195 16 232 200 30
43322-245 207 45 241 208 19 232 195 16 184 144 12
43323-218 194 134 211 206 186 42 42 42 2 2 6
43324- 2 2 6 2 2 6 2 2 6 2 2 6
43325- 50 50 50 74 74 74 30 30 30 6 6 6
43326- 0 0 0 0 0 0 0 0 0 0 0 0
43327- 0 0 0 0 0 0 0 0 0 0 0 0
43328- 0 0 0 0 0 0 0 0 0 0 0 0
43329- 0 0 0 0 0 0 0 0 0 0 0 0
43330- 0 0 0 0 0 0 0 0 0 0 0 0
43331- 0 0 0 0 0 0 0 0 0 0 0 0
43332- 0 0 0 0 0 0 0 0 0 0 0 0
43333- 0 0 0 0 0 0 0 0 0 0 0 0
43334- 0 0 0 0 0 0 0 0 0 0 0 0
43335- 0 0 0 0 0 0 0 0 0 0 0 0
43336- 0 0 0 0 0 0 0 0 0 0 0 0
43337- 0 0 0 0 0 0 0 0 0 0 0 0
43338- 0 0 0 0 0 0 0 0 0 10 10 10
43339- 34 34 34 86 86 86 14 14 14 2 2 6
43340-121 87 25 192 133 9 219 162 10 239 182 13
43341-236 186 11 232 195 16 241 208 19 244 214 54
43342-246 218 60 246 218 38 246 215 20 241 208 19
43343-241 208 19 226 184 13 121 87 25 2 2 6
43344- 2 2 6 2 2 6 2 2 6 2 2 6
43345- 50 50 50 82 82 82 34 34 34 10 10 10
43346- 0 0 0 0 0 0 0 0 0 0 0 0
43347- 0 0 0 0 0 0 0 0 0 0 0 0
43348- 0 0 0 0 0 0 0 0 0 0 0 0
43349- 0 0 0 0 0 0 0 0 0 0 0 0
43350- 0 0 0 0 0 0 0 0 0 0 0 0
43351- 0 0 0 0 0 0 0 0 0 0 0 0
43352- 0 0 0 0 0 0 0 0 0 0 0 0
43353- 0 0 0 0 0 0 0 0 0 0 0 0
43354- 0 0 0 0 0 0 0 0 0 0 0 0
43355- 0 0 0 0 0 0 0 0 0 0 0 0
43356- 0 0 0 0 0 0 0 0 0 0 0 0
43357- 0 0 0 0 0 0 0 0 0 0 0 0
43358- 0 0 0 0 0 0 0 0 0 10 10 10
43359- 34 34 34 82 82 82 30 30 30 61 42 6
43360-180 123 7 206 145 10 230 174 11 239 182 13
43361-234 190 10 238 202 15 241 208 19 246 218 74
43362-246 218 38 246 215 20 246 215 20 246 215 20
43363-226 184 13 215 174 15 184 144 12 6 6 6
43364- 2 2 6 2 2 6 2 2 6 2 2 6
43365- 26 26 26 94 94 94 42 42 42 14 14 14
43366- 0 0 0 0 0 0 0 0 0 0 0 0
43367- 0 0 0 0 0 0 0 0 0 0 0 0
43368- 0 0 0 0 0 0 0 0 0 0 0 0
43369- 0 0 0 0 0 0 0 0 0 0 0 0
43370- 0 0 0 0 0 0 0 0 0 0 0 0
43371- 0 0 0 0 0 0 0 0 0 0 0 0
43372- 0 0 0 0 0 0 0 0 0 0 0 0
43373- 0 0 0 0 0 0 0 0 0 0 0 0
43374- 0 0 0 0 0 0 0 0 0 0 0 0
43375- 0 0 0 0 0 0 0 0 0 0 0 0
43376- 0 0 0 0 0 0 0 0 0 0 0 0
43377- 0 0 0 0 0 0 0 0 0 0 0 0
43378- 0 0 0 0 0 0 0 0 0 10 10 10
43379- 30 30 30 78 78 78 50 50 50 104 69 6
43380-192 133 9 216 158 10 236 178 12 236 186 11
43381-232 195 16 241 208 19 244 214 54 245 215 43
43382-246 215 20 246 215 20 241 208 19 198 155 10
43383-200 144 11 216 158 10 156 118 10 2 2 6
43384- 2 2 6 2 2 6 2 2 6 2 2 6
43385- 6 6 6 90 90 90 54 54 54 18 18 18
43386- 6 6 6 0 0 0 0 0 0 0 0 0
43387- 0 0 0 0 0 0 0 0 0 0 0 0
43388- 0 0 0 0 0 0 0 0 0 0 0 0
43389- 0 0 0 0 0 0 0 0 0 0 0 0
43390- 0 0 0 0 0 0 0 0 0 0 0 0
43391- 0 0 0 0 0 0 0 0 0 0 0 0
43392- 0 0 0 0 0 0 0 0 0 0 0 0
43393- 0 0 0 0 0 0 0 0 0 0 0 0
43394- 0 0 0 0 0 0 0 0 0 0 0 0
43395- 0 0 0 0 0 0 0 0 0 0 0 0
43396- 0 0 0 0 0 0 0 0 0 0 0 0
43397- 0 0 0 0 0 0 0 0 0 0 0 0
43398- 0 0 0 0 0 0 0 0 0 10 10 10
43399- 30 30 30 78 78 78 46 46 46 22 22 22
43400-137 92 6 210 162 10 239 182 13 238 190 10
43401-238 202 15 241 208 19 246 215 20 246 215 20
43402-241 208 19 203 166 17 185 133 11 210 150 10
43403-216 158 10 210 150 10 102 78 10 2 2 6
43404- 6 6 6 54 54 54 14 14 14 2 2 6
43405- 2 2 6 62 62 62 74 74 74 30 30 30
43406- 10 10 10 0 0 0 0 0 0 0 0 0
43407- 0 0 0 0 0 0 0 0 0 0 0 0
43408- 0 0 0 0 0 0 0 0 0 0 0 0
43409- 0 0 0 0 0 0 0 0 0 0 0 0
43410- 0 0 0 0 0 0 0 0 0 0 0 0
43411- 0 0 0 0 0 0 0 0 0 0 0 0
43412- 0 0 0 0 0 0 0 0 0 0 0 0
43413- 0 0 0 0 0 0 0 0 0 0 0 0
43414- 0 0 0 0 0 0 0 0 0 0 0 0
43415- 0 0 0 0 0 0 0 0 0 0 0 0
43416- 0 0 0 0 0 0 0 0 0 0 0 0
43417- 0 0 0 0 0 0 0 0 0 0 0 0
43418- 0 0 0 0 0 0 0 0 0 10 10 10
43419- 34 34 34 78 78 78 50 50 50 6 6 6
43420- 94 70 30 139 102 15 190 146 13 226 184 13
43421-232 200 30 232 195 16 215 174 15 190 146 13
43422-168 122 10 192 133 9 210 150 10 213 154 11
43423-202 150 34 182 157 106 101 98 89 2 2 6
43424- 2 2 6 78 78 78 116 116 116 58 58 58
43425- 2 2 6 22 22 22 90 90 90 46 46 46
43426- 18 18 18 6 6 6 0 0 0 0 0 0
43427- 0 0 0 0 0 0 0 0 0 0 0 0
43428- 0 0 0 0 0 0 0 0 0 0 0 0
43429- 0 0 0 0 0 0 0 0 0 0 0 0
43430- 0 0 0 0 0 0 0 0 0 0 0 0
43431- 0 0 0 0 0 0 0 0 0 0 0 0
43432- 0 0 0 0 0 0 0 0 0 0 0 0
43433- 0 0 0 0 0 0 0 0 0 0 0 0
43434- 0 0 0 0 0 0 0 0 0 0 0 0
43435- 0 0 0 0 0 0 0 0 0 0 0 0
43436- 0 0 0 0 0 0 0 0 0 0 0 0
43437- 0 0 0 0 0 0 0 0 0 0 0 0
43438- 0 0 0 0 0 0 0 0 0 10 10 10
43439- 38 38 38 86 86 86 50 50 50 6 6 6
43440-128 128 128 174 154 114 156 107 11 168 122 10
43441-198 155 10 184 144 12 197 138 11 200 144 11
43442-206 145 10 206 145 10 197 138 11 188 164 115
43443-195 195 195 198 198 198 174 174 174 14 14 14
43444- 2 2 6 22 22 22 116 116 116 116 116 116
43445- 22 22 22 2 2 6 74 74 74 70 70 70
43446- 30 30 30 10 10 10 0 0 0 0 0 0
43447- 0 0 0 0 0 0 0 0 0 0 0 0
43448- 0 0 0 0 0 0 0 0 0 0 0 0
43449- 0 0 0 0 0 0 0 0 0 0 0 0
43450- 0 0 0 0 0 0 0 0 0 0 0 0
43451- 0 0 0 0 0 0 0 0 0 0 0 0
43452- 0 0 0 0 0 0 0 0 0 0 0 0
43453- 0 0 0 0 0 0 0 0 0 0 0 0
43454- 0 0 0 0 0 0 0 0 0 0 0 0
43455- 0 0 0 0 0 0 0 0 0 0 0 0
43456- 0 0 0 0 0 0 0 0 0 0 0 0
43457- 0 0 0 0 0 0 0 0 0 0 0 0
43458- 0 0 0 0 0 0 6 6 6 18 18 18
43459- 50 50 50 101 101 101 26 26 26 10 10 10
43460-138 138 138 190 190 190 174 154 114 156 107 11
43461-197 138 11 200 144 11 197 138 11 192 133 9
43462-180 123 7 190 142 34 190 178 144 187 187 187
43463-202 202 202 221 221 221 214 214 214 66 66 66
43464- 2 2 6 2 2 6 50 50 50 62 62 62
43465- 6 6 6 2 2 6 10 10 10 90 90 90
43466- 50 50 50 18 18 18 6 6 6 0 0 0
43467- 0 0 0 0 0 0 0 0 0 0 0 0
43468- 0 0 0 0 0 0 0 0 0 0 0 0
43469- 0 0 0 0 0 0 0 0 0 0 0 0
43470- 0 0 0 0 0 0 0 0 0 0 0 0
43471- 0 0 0 0 0 0 0 0 0 0 0 0
43472- 0 0 0 0 0 0 0 0 0 0 0 0
43473- 0 0 0 0 0 0 0 0 0 0 0 0
43474- 0 0 0 0 0 0 0 0 0 0 0 0
43475- 0 0 0 0 0 0 0 0 0 0 0 0
43476- 0 0 0 0 0 0 0 0 0 0 0 0
43477- 0 0 0 0 0 0 0 0 0 0 0 0
43478- 0 0 0 0 0 0 10 10 10 34 34 34
43479- 74 74 74 74 74 74 2 2 6 6 6 6
43480-144 144 144 198 198 198 190 190 190 178 166 146
43481-154 121 60 156 107 11 156 107 11 168 124 44
43482-174 154 114 187 187 187 190 190 190 210 210 210
43483-246 246 246 253 253 253 253 253 253 182 182 182
43484- 6 6 6 2 2 6 2 2 6 2 2 6
43485- 2 2 6 2 2 6 2 2 6 62 62 62
43486- 74 74 74 34 34 34 14 14 14 0 0 0
43487- 0 0 0 0 0 0 0 0 0 0 0 0
43488- 0 0 0 0 0 0 0 0 0 0 0 0
43489- 0 0 0 0 0 0 0 0 0 0 0 0
43490- 0 0 0 0 0 0 0 0 0 0 0 0
43491- 0 0 0 0 0 0 0 0 0 0 0 0
43492- 0 0 0 0 0 0 0 0 0 0 0 0
43493- 0 0 0 0 0 0 0 0 0 0 0 0
43494- 0 0 0 0 0 0 0 0 0 0 0 0
43495- 0 0 0 0 0 0 0 0 0 0 0 0
43496- 0 0 0 0 0 0 0 0 0 0 0 0
43497- 0 0 0 0 0 0 0 0 0 0 0 0
43498- 0 0 0 10 10 10 22 22 22 54 54 54
43499- 94 94 94 18 18 18 2 2 6 46 46 46
43500-234 234 234 221 221 221 190 190 190 190 190 190
43501-190 190 190 187 187 187 187 187 187 190 190 190
43502-190 190 190 195 195 195 214 214 214 242 242 242
43503-253 253 253 253 253 253 253 253 253 253 253 253
43504- 82 82 82 2 2 6 2 2 6 2 2 6
43505- 2 2 6 2 2 6 2 2 6 14 14 14
43506- 86 86 86 54 54 54 22 22 22 6 6 6
43507- 0 0 0 0 0 0 0 0 0 0 0 0
43508- 0 0 0 0 0 0 0 0 0 0 0 0
43509- 0 0 0 0 0 0 0 0 0 0 0 0
43510- 0 0 0 0 0 0 0 0 0 0 0 0
43511- 0 0 0 0 0 0 0 0 0 0 0 0
43512- 0 0 0 0 0 0 0 0 0 0 0 0
43513- 0 0 0 0 0 0 0 0 0 0 0 0
43514- 0 0 0 0 0 0 0 0 0 0 0 0
43515- 0 0 0 0 0 0 0 0 0 0 0 0
43516- 0 0 0 0 0 0 0 0 0 0 0 0
43517- 0 0 0 0 0 0 0 0 0 0 0 0
43518- 6 6 6 18 18 18 46 46 46 90 90 90
43519- 46 46 46 18 18 18 6 6 6 182 182 182
43520-253 253 253 246 246 246 206 206 206 190 190 190
43521-190 190 190 190 190 190 190 190 190 190 190 190
43522-206 206 206 231 231 231 250 250 250 253 253 253
43523-253 253 253 253 253 253 253 253 253 253 253 253
43524-202 202 202 14 14 14 2 2 6 2 2 6
43525- 2 2 6 2 2 6 2 2 6 2 2 6
43526- 42 42 42 86 86 86 42 42 42 18 18 18
43527- 6 6 6 0 0 0 0 0 0 0 0 0
43528- 0 0 0 0 0 0 0 0 0 0 0 0
43529- 0 0 0 0 0 0 0 0 0 0 0 0
43530- 0 0 0 0 0 0 0 0 0 0 0 0
43531- 0 0 0 0 0 0 0 0 0 0 0 0
43532- 0 0 0 0 0 0 0 0 0 0 0 0
43533- 0 0 0 0 0 0 0 0 0 0 0 0
43534- 0 0 0 0 0 0 0 0 0 0 0 0
43535- 0 0 0 0 0 0 0 0 0 0 0 0
43536- 0 0 0 0 0 0 0 0 0 0 0 0
43537- 0 0 0 0 0 0 0 0 0 6 6 6
43538- 14 14 14 38 38 38 74 74 74 66 66 66
43539- 2 2 6 6 6 6 90 90 90 250 250 250
43540-253 253 253 253 253 253 238 238 238 198 198 198
43541-190 190 190 190 190 190 195 195 195 221 221 221
43542-246 246 246 253 253 253 253 253 253 253 253 253
43543-253 253 253 253 253 253 253 253 253 253 253 253
43544-253 253 253 82 82 82 2 2 6 2 2 6
43545- 2 2 6 2 2 6 2 2 6 2 2 6
43546- 2 2 6 78 78 78 70 70 70 34 34 34
43547- 14 14 14 6 6 6 0 0 0 0 0 0
43548- 0 0 0 0 0 0 0 0 0 0 0 0
43549- 0 0 0 0 0 0 0 0 0 0 0 0
43550- 0 0 0 0 0 0 0 0 0 0 0 0
43551- 0 0 0 0 0 0 0 0 0 0 0 0
43552- 0 0 0 0 0 0 0 0 0 0 0 0
43553- 0 0 0 0 0 0 0 0 0 0 0 0
43554- 0 0 0 0 0 0 0 0 0 0 0 0
43555- 0 0 0 0 0 0 0 0 0 0 0 0
43556- 0 0 0 0 0 0 0 0 0 0 0 0
43557- 0 0 0 0 0 0 0 0 0 14 14 14
43558- 34 34 34 66 66 66 78 78 78 6 6 6
43559- 2 2 6 18 18 18 218 218 218 253 253 253
43560-253 253 253 253 253 253 253 253 253 246 246 246
43561-226 226 226 231 231 231 246 246 246 253 253 253
43562-253 253 253 253 253 253 253 253 253 253 253 253
43563-253 253 253 253 253 253 253 253 253 253 253 253
43564-253 253 253 178 178 178 2 2 6 2 2 6
43565- 2 2 6 2 2 6 2 2 6 2 2 6
43566- 2 2 6 18 18 18 90 90 90 62 62 62
43567- 30 30 30 10 10 10 0 0 0 0 0 0
43568- 0 0 0 0 0 0 0 0 0 0 0 0
43569- 0 0 0 0 0 0 0 0 0 0 0 0
43570- 0 0 0 0 0 0 0 0 0 0 0 0
43571- 0 0 0 0 0 0 0 0 0 0 0 0
43572- 0 0 0 0 0 0 0 0 0 0 0 0
43573- 0 0 0 0 0 0 0 0 0 0 0 0
43574- 0 0 0 0 0 0 0 0 0 0 0 0
43575- 0 0 0 0 0 0 0 0 0 0 0 0
43576- 0 0 0 0 0 0 0 0 0 0 0 0
43577- 0 0 0 0 0 0 10 10 10 26 26 26
43578- 58 58 58 90 90 90 18 18 18 2 2 6
43579- 2 2 6 110 110 110 253 253 253 253 253 253
43580-253 253 253 253 253 253 253 253 253 253 253 253
43581-250 250 250 253 253 253 253 253 253 253 253 253
43582-253 253 253 253 253 253 253 253 253 253 253 253
43583-253 253 253 253 253 253 253 253 253 253 253 253
43584-253 253 253 231 231 231 18 18 18 2 2 6
43585- 2 2 6 2 2 6 2 2 6 2 2 6
43586- 2 2 6 2 2 6 18 18 18 94 94 94
43587- 54 54 54 26 26 26 10 10 10 0 0 0
43588- 0 0 0 0 0 0 0 0 0 0 0 0
43589- 0 0 0 0 0 0 0 0 0 0 0 0
43590- 0 0 0 0 0 0 0 0 0 0 0 0
43591- 0 0 0 0 0 0 0 0 0 0 0 0
43592- 0 0 0 0 0 0 0 0 0 0 0 0
43593- 0 0 0 0 0 0 0 0 0 0 0 0
43594- 0 0 0 0 0 0 0 0 0 0 0 0
43595- 0 0 0 0 0 0 0 0 0 0 0 0
43596- 0 0 0 0 0 0 0 0 0 0 0 0
43597- 0 0 0 6 6 6 22 22 22 50 50 50
43598- 90 90 90 26 26 26 2 2 6 2 2 6
43599- 14 14 14 195 195 195 250 250 250 253 253 253
43600-253 253 253 253 253 253 253 253 253 253 253 253
43601-253 253 253 253 253 253 253 253 253 253 253 253
43602-253 253 253 253 253 253 253 253 253 253 253 253
43603-253 253 253 253 253 253 253 253 253 253 253 253
43604-250 250 250 242 242 242 54 54 54 2 2 6
43605- 2 2 6 2 2 6 2 2 6 2 2 6
43606- 2 2 6 2 2 6 2 2 6 38 38 38
43607- 86 86 86 50 50 50 22 22 22 6 6 6
43608- 0 0 0 0 0 0 0 0 0 0 0 0
43609- 0 0 0 0 0 0 0 0 0 0 0 0
43610- 0 0 0 0 0 0 0 0 0 0 0 0
43611- 0 0 0 0 0 0 0 0 0 0 0 0
43612- 0 0 0 0 0 0 0 0 0 0 0 0
43613- 0 0 0 0 0 0 0 0 0 0 0 0
43614- 0 0 0 0 0 0 0 0 0 0 0 0
43615- 0 0 0 0 0 0 0 0 0 0 0 0
43616- 0 0 0 0 0 0 0 0 0 0 0 0
43617- 6 6 6 14 14 14 38 38 38 82 82 82
43618- 34 34 34 2 2 6 2 2 6 2 2 6
43619- 42 42 42 195 195 195 246 246 246 253 253 253
43620-253 253 253 253 253 253 253 253 253 250 250 250
43621-242 242 242 242 242 242 250 250 250 253 253 253
43622-253 253 253 253 253 253 253 253 253 253 253 253
43623-253 253 253 250 250 250 246 246 246 238 238 238
43624-226 226 226 231 231 231 101 101 101 6 6 6
43625- 2 2 6 2 2 6 2 2 6 2 2 6
43626- 2 2 6 2 2 6 2 2 6 2 2 6
43627- 38 38 38 82 82 82 42 42 42 14 14 14
43628- 6 6 6 0 0 0 0 0 0 0 0 0
43629- 0 0 0 0 0 0 0 0 0 0 0 0
43630- 0 0 0 0 0 0 0 0 0 0 0 0
43631- 0 0 0 0 0 0 0 0 0 0 0 0
43632- 0 0 0 0 0 0 0 0 0 0 0 0
43633- 0 0 0 0 0 0 0 0 0 0 0 0
43634- 0 0 0 0 0 0 0 0 0 0 0 0
43635- 0 0 0 0 0 0 0 0 0 0 0 0
43636- 0 0 0 0 0 0 0 0 0 0 0 0
43637- 10 10 10 26 26 26 62 62 62 66 66 66
43638- 2 2 6 2 2 6 2 2 6 6 6 6
43639- 70 70 70 170 170 170 206 206 206 234 234 234
43640-246 246 246 250 250 250 250 250 250 238 238 238
43641-226 226 226 231 231 231 238 238 238 250 250 250
43642-250 250 250 250 250 250 246 246 246 231 231 231
43643-214 214 214 206 206 206 202 202 202 202 202 202
43644-198 198 198 202 202 202 182 182 182 18 18 18
43645- 2 2 6 2 2 6 2 2 6 2 2 6
43646- 2 2 6 2 2 6 2 2 6 2 2 6
43647- 2 2 6 62 62 62 66 66 66 30 30 30
43648- 10 10 10 0 0 0 0 0 0 0 0 0
43649- 0 0 0 0 0 0 0 0 0 0 0 0
43650- 0 0 0 0 0 0 0 0 0 0 0 0
43651- 0 0 0 0 0 0 0 0 0 0 0 0
43652- 0 0 0 0 0 0 0 0 0 0 0 0
43653- 0 0 0 0 0 0 0 0 0 0 0 0
43654- 0 0 0 0 0 0 0 0 0 0 0 0
43655- 0 0 0 0 0 0 0 0 0 0 0 0
43656- 0 0 0 0 0 0 0 0 0 0 0 0
43657- 14 14 14 42 42 42 82 82 82 18 18 18
43658- 2 2 6 2 2 6 2 2 6 10 10 10
43659- 94 94 94 182 182 182 218 218 218 242 242 242
43660-250 250 250 253 253 253 253 253 253 250 250 250
43661-234 234 234 253 253 253 253 253 253 253 253 253
43662-253 253 253 253 253 253 253 253 253 246 246 246
43663-238 238 238 226 226 226 210 210 210 202 202 202
43664-195 195 195 195 195 195 210 210 210 158 158 158
43665- 6 6 6 14 14 14 50 50 50 14 14 14
43666- 2 2 6 2 2 6 2 2 6 2 2 6
43667- 2 2 6 6 6 6 86 86 86 46 46 46
43668- 18 18 18 6 6 6 0 0 0 0 0 0
43669- 0 0 0 0 0 0 0 0 0 0 0 0
43670- 0 0 0 0 0 0 0 0 0 0 0 0
43671- 0 0 0 0 0 0 0 0 0 0 0 0
43672- 0 0 0 0 0 0 0 0 0 0 0 0
43673- 0 0 0 0 0 0 0 0 0 0 0 0
43674- 0 0 0 0 0 0 0 0 0 0 0 0
43675- 0 0 0 0 0 0 0 0 0 0 0 0
43676- 0 0 0 0 0 0 0 0 0 6 6 6
43677- 22 22 22 54 54 54 70 70 70 2 2 6
43678- 2 2 6 10 10 10 2 2 6 22 22 22
43679-166 166 166 231 231 231 250 250 250 253 253 253
43680-253 253 253 253 253 253 253 253 253 250 250 250
43681-242 242 242 253 253 253 253 253 253 253 253 253
43682-253 253 253 253 253 253 253 253 253 253 253 253
43683-253 253 253 253 253 253 253 253 253 246 246 246
43684-231 231 231 206 206 206 198 198 198 226 226 226
43685- 94 94 94 2 2 6 6 6 6 38 38 38
43686- 30 30 30 2 2 6 2 2 6 2 2 6
43687- 2 2 6 2 2 6 62 62 62 66 66 66
43688- 26 26 26 10 10 10 0 0 0 0 0 0
43689- 0 0 0 0 0 0 0 0 0 0 0 0
43690- 0 0 0 0 0 0 0 0 0 0 0 0
43691- 0 0 0 0 0 0 0 0 0 0 0 0
43692- 0 0 0 0 0 0 0 0 0 0 0 0
43693- 0 0 0 0 0 0 0 0 0 0 0 0
43694- 0 0 0 0 0 0 0 0 0 0 0 0
43695- 0 0 0 0 0 0 0 0 0 0 0 0
43696- 0 0 0 0 0 0 0 0 0 10 10 10
43697- 30 30 30 74 74 74 50 50 50 2 2 6
43698- 26 26 26 26 26 26 2 2 6 106 106 106
43699-238 238 238 253 253 253 253 253 253 253 253 253
43700-253 253 253 253 253 253 253 253 253 253 253 253
43701-253 253 253 253 253 253 253 253 253 253 253 253
43702-253 253 253 253 253 253 253 253 253 253 253 253
43703-253 253 253 253 253 253 253 253 253 253 253 253
43704-253 253 253 246 246 246 218 218 218 202 202 202
43705-210 210 210 14 14 14 2 2 6 2 2 6
43706- 30 30 30 22 22 22 2 2 6 2 2 6
43707- 2 2 6 2 2 6 18 18 18 86 86 86
43708- 42 42 42 14 14 14 0 0 0 0 0 0
43709- 0 0 0 0 0 0 0 0 0 0 0 0
43710- 0 0 0 0 0 0 0 0 0 0 0 0
43711- 0 0 0 0 0 0 0 0 0 0 0 0
43712- 0 0 0 0 0 0 0 0 0 0 0 0
43713- 0 0 0 0 0 0 0 0 0 0 0 0
43714- 0 0 0 0 0 0 0 0 0 0 0 0
43715- 0 0 0 0 0 0 0 0 0 0 0 0
43716- 0 0 0 0 0 0 0 0 0 14 14 14
43717- 42 42 42 90 90 90 22 22 22 2 2 6
43718- 42 42 42 2 2 6 18 18 18 218 218 218
43719-253 253 253 253 253 253 253 253 253 253 253 253
43720-253 253 253 253 253 253 253 253 253 253 253 253
43721-253 253 253 253 253 253 253 253 253 253 253 253
43722-253 253 253 253 253 253 253 253 253 253 253 253
43723-253 253 253 253 253 253 253 253 253 253 253 253
43724-253 253 253 253 253 253 250 250 250 221 221 221
43725-218 218 218 101 101 101 2 2 6 14 14 14
43726- 18 18 18 38 38 38 10 10 10 2 2 6
43727- 2 2 6 2 2 6 2 2 6 78 78 78
43728- 58 58 58 22 22 22 6 6 6 0 0 0
43729- 0 0 0 0 0 0 0 0 0 0 0 0
43730- 0 0 0 0 0 0 0 0 0 0 0 0
43731- 0 0 0 0 0 0 0 0 0 0 0 0
43732- 0 0 0 0 0 0 0 0 0 0 0 0
43733- 0 0 0 0 0 0 0 0 0 0 0 0
43734- 0 0 0 0 0 0 0 0 0 0 0 0
43735- 0 0 0 0 0 0 0 0 0 0 0 0
43736- 0 0 0 0 0 0 6 6 6 18 18 18
43737- 54 54 54 82 82 82 2 2 6 26 26 26
43738- 22 22 22 2 2 6 123 123 123 253 253 253
43739-253 253 253 253 253 253 253 253 253 253 253 253
43740-253 253 253 253 253 253 253 253 253 253 253 253
43741-253 253 253 253 253 253 253 253 253 253 253 253
43742-253 253 253 253 253 253 253 253 253 253 253 253
43743-253 253 253 253 253 253 253 253 253 253 253 253
43744-253 253 253 253 253 253 253 253 253 250 250 250
43745-238 238 238 198 198 198 6 6 6 38 38 38
43746- 58 58 58 26 26 26 38 38 38 2 2 6
43747- 2 2 6 2 2 6 2 2 6 46 46 46
43748- 78 78 78 30 30 30 10 10 10 0 0 0
43749- 0 0 0 0 0 0 0 0 0 0 0 0
43750- 0 0 0 0 0 0 0 0 0 0 0 0
43751- 0 0 0 0 0 0 0 0 0 0 0 0
43752- 0 0 0 0 0 0 0 0 0 0 0 0
43753- 0 0 0 0 0 0 0 0 0 0 0 0
43754- 0 0 0 0 0 0 0 0 0 0 0 0
43755- 0 0 0 0 0 0 0 0 0 0 0 0
43756- 0 0 0 0 0 0 10 10 10 30 30 30
43757- 74 74 74 58 58 58 2 2 6 42 42 42
43758- 2 2 6 22 22 22 231 231 231 253 253 253
43759-253 253 253 253 253 253 253 253 253 253 253 253
43760-253 253 253 253 253 253 253 253 253 250 250 250
43761-253 253 253 253 253 253 253 253 253 253 253 253
43762-253 253 253 253 253 253 253 253 253 253 253 253
43763-253 253 253 253 253 253 253 253 253 253 253 253
43764-253 253 253 253 253 253 253 253 253 253 253 253
43765-253 253 253 246 246 246 46 46 46 38 38 38
43766- 42 42 42 14 14 14 38 38 38 14 14 14
43767- 2 2 6 2 2 6 2 2 6 6 6 6
43768- 86 86 86 46 46 46 14 14 14 0 0 0
43769- 0 0 0 0 0 0 0 0 0 0 0 0
43770- 0 0 0 0 0 0 0 0 0 0 0 0
43771- 0 0 0 0 0 0 0 0 0 0 0 0
43772- 0 0 0 0 0 0 0 0 0 0 0 0
43773- 0 0 0 0 0 0 0 0 0 0 0 0
43774- 0 0 0 0 0 0 0 0 0 0 0 0
43775- 0 0 0 0 0 0 0 0 0 0 0 0
43776- 0 0 0 6 6 6 14 14 14 42 42 42
43777- 90 90 90 18 18 18 18 18 18 26 26 26
43778- 2 2 6 116 116 116 253 253 253 253 253 253
43779-253 253 253 253 253 253 253 253 253 253 253 253
43780-253 253 253 253 253 253 250 250 250 238 238 238
43781-253 253 253 253 253 253 253 253 253 253 253 253
43782-253 253 253 253 253 253 253 253 253 253 253 253
43783-253 253 253 253 253 253 253 253 253 253 253 253
43784-253 253 253 253 253 253 253 253 253 253 253 253
43785-253 253 253 253 253 253 94 94 94 6 6 6
43786- 2 2 6 2 2 6 10 10 10 34 34 34
43787- 2 2 6 2 2 6 2 2 6 2 2 6
43788- 74 74 74 58 58 58 22 22 22 6 6 6
43789- 0 0 0 0 0 0 0 0 0 0 0 0
43790- 0 0 0 0 0 0 0 0 0 0 0 0
43791- 0 0 0 0 0 0 0 0 0 0 0 0
43792- 0 0 0 0 0 0 0 0 0 0 0 0
43793- 0 0 0 0 0 0 0 0 0 0 0 0
43794- 0 0 0 0 0 0 0 0 0 0 0 0
43795- 0 0 0 0 0 0 0 0 0 0 0 0
43796- 0 0 0 10 10 10 26 26 26 66 66 66
43797- 82 82 82 2 2 6 38 38 38 6 6 6
43798- 14 14 14 210 210 210 253 253 253 253 253 253
43799-253 253 253 253 253 253 253 253 253 253 253 253
43800-253 253 253 253 253 253 246 246 246 242 242 242
43801-253 253 253 253 253 253 253 253 253 253 253 253
43802-253 253 253 253 253 253 253 253 253 253 253 253
43803-253 253 253 253 253 253 253 253 253 253 253 253
43804-253 253 253 253 253 253 253 253 253 253 253 253
43805-253 253 253 253 253 253 144 144 144 2 2 6
43806- 2 2 6 2 2 6 2 2 6 46 46 46
43807- 2 2 6 2 2 6 2 2 6 2 2 6
43808- 42 42 42 74 74 74 30 30 30 10 10 10
43809- 0 0 0 0 0 0 0 0 0 0 0 0
43810- 0 0 0 0 0 0 0 0 0 0 0 0
43811- 0 0 0 0 0 0 0 0 0 0 0 0
43812- 0 0 0 0 0 0 0 0 0 0 0 0
43813- 0 0 0 0 0 0 0 0 0 0 0 0
43814- 0 0 0 0 0 0 0 0 0 0 0 0
43815- 0 0 0 0 0 0 0 0 0 0 0 0
43816- 6 6 6 14 14 14 42 42 42 90 90 90
43817- 26 26 26 6 6 6 42 42 42 2 2 6
43818- 74 74 74 250 250 250 253 253 253 253 253 253
43819-253 253 253 253 253 253 253 253 253 253 253 253
43820-253 253 253 253 253 253 242 242 242 242 242 242
43821-253 253 253 253 253 253 253 253 253 253 253 253
43822-253 253 253 253 253 253 253 253 253 253 253 253
43823-253 253 253 253 253 253 253 253 253 253 253 253
43824-253 253 253 253 253 253 253 253 253 253 253 253
43825-253 253 253 253 253 253 182 182 182 2 2 6
43826- 2 2 6 2 2 6 2 2 6 46 46 46
43827- 2 2 6 2 2 6 2 2 6 2 2 6
43828- 10 10 10 86 86 86 38 38 38 10 10 10
43829- 0 0 0 0 0 0 0 0 0 0 0 0
43830- 0 0 0 0 0 0 0 0 0 0 0 0
43831- 0 0 0 0 0 0 0 0 0 0 0 0
43832- 0 0 0 0 0 0 0 0 0 0 0 0
43833- 0 0 0 0 0 0 0 0 0 0 0 0
43834- 0 0 0 0 0 0 0 0 0 0 0 0
43835- 0 0 0 0 0 0 0 0 0 0 0 0
43836- 10 10 10 26 26 26 66 66 66 82 82 82
43837- 2 2 6 22 22 22 18 18 18 2 2 6
43838-149 149 149 253 253 253 253 253 253 253 253 253
43839-253 253 253 253 253 253 253 253 253 253 253 253
43840-253 253 253 253 253 253 234 234 234 242 242 242
43841-253 253 253 253 253 253 253 253 253 253 253 253
43842-253 253 253 253 253 253 253 253 253 253 253 253
43843-253 253 253 253 253 253 253 253 253 253 253 253
43844-253 253 253 253 253 253 253 253 253 253 253 253
43845-253 253 253 253 253 253 206 206 206 2 2 6
43846- 2 2 6 2 2 6 2 2 6 38 38 38
43847- 2 2 6 2 2 6 2 2 6 2 2 6
43848- 6 6 6 86 86 86 46 46 46 14 14 14
43849- 0 0 0 0 0 0 0 0 0 0 0 0
43850- 0 0 0 0 0 0 0 0 0 0 0 0
43851- 0 0 0 0 0 0 0 0 0 0 0 0
43852- 0 0 0 0 0 0 0 0 0 0 0 0
43853- 0 0 0 0 0 0 0 0 0 0 0 0
43854- 0 0 0 0 0 0 0 0 0 0 0 0
43855- 0 0 0 0 0 0 0 0 0 6 6 6
43856- 18 18 18 46 46 46 86 86 86 18 18 18
43857- 2 2 6 34 34 34 10 10 10 6 6 6
43858-210 210 210 253 253 253 253 253 253 253 253 253
43859-253 253 253 253 253 253 253 253 253 253 253 253
43860-253 253 253 253 253 253 234 234 234 242 242 242
43861-253 253 253 253 253 253 253 253 253 253 253 253
43862-253 253 253 253 253 253 253 253 253 253 253 253
43863-253 253 253 253 253 253 253 253 253 253 253 253
43864-253 253 253 253 253 253 253 253 253 253 253 253
43865-253 253 253 253 253 253 221 221 221 6 6 6
43866- 2 2 6 2 2 6 6 6 6 30 30 30
43867- 2 2 6 2 2 6 2 2 6 2 2 6
43868- 2 2 6 82 82 82 54 54 54 18 18 18
43869- 6 6 6 0 0 0 0 0 0 0 0 0
43870- 0 0 0 0 0 0 0 0 0 0 0 0
43871- 0 0 0 0 0 0 0 0 0 0 0 0
43872- 0 0 0 0 0 0 0 0 0 0 0 0
43873- 0 0 0 0 0 0 0 0 0 0 0 0
43874- 0 0 0 0 0 0 0 0 0 0 0 0
43875- 0 0 0 0 0 0 0 0 0 10 10 10
43876- 26 26 26 66 66 66 62 62 62 2 2 6
43877- 2 2 6 38 38 38 10 10 10 26 26 26
43878-238 238 238 253 253 253 253 253 253 253 253 253
43879-253 253 253 253 253 253 253 253 253 253 253 253
43880-253 253 253 253 253 253 231 231 231 238 238 238
43881-253 253 253 253 253 253 253 253 253 253 253 253
43882-253 253 253 253 253 253 253 253 253 253 253 253
43883-253 253 253 253 253 253 253 253 253 253 253 253
43884-253 253 253 253 253 253 253 253 253 253 253 253
43885-253 253 253 253 253 253 231 231 231 6 6 6
43886- 2 2 6 2 2 6 10 10 10 30 30 30
43887- 2 2 6 2 2 6 2 2 6 2 2 6
43888- 2 2 6 66 66 66 58 58 58 22 22 22
43889- 6 6 6 0 0 0 0 0 0 0 0 0
43890- 0 0 0 0 0 0 0 0 0 0 0 0
43891- 0 0 0 0 0 0 0 0 0 0 0 0
43892- 0 0 0 0 0 0 0 0 0 0 0 0
43893- 0 0 0 0 0 0 0 0 0 0 0 0
43894- 0 0 0 0 0 0 0 0 0 0 0 0
43895- 0 0 0 0 0 0 0 0 0 10 10 10
43896- 38 38 38 78 78 78 6 6 6 2 2 6
43897- 2 2 6 46 46 46 14 14 14 42 42 42
43898-246 246 246 253 253 253 253 253 253 253 253 253
43899-253 253 253 253 253 253 253 253 253 253 253 253
43900-253 253 253 253 253 253 231 231 231 242 242 242
43901-253 253 253 253 253 253 253 253 253 253 253 253
43902-253 253 253 253 253 253 253 253 253 253 253 253
43903-253 253 253 253 253 253 253 253 253 253 253 253
43904-253 253 253 253 253 253 253 253 253 253 253 253
43905-253 253 253 253 253 253 234 234 234 10 10 10
43906- 2 2 6 2 2 6 22 22 22 14 14 14
43907- 2 2 6 2 2 6 2 2 6 2 2 6
43908- 2 2 6 66 66 66 62 62 62 22 22 22
43909- 6 6 6 0 0 0 0 0 0 0 0 0
43910- 0 0 0 0 0 0 0 0 0 0 0 0
43911- 0 0 0 0 0 0 0 0 0 0 0 0
43912- 0 0 0 0 0 0 0 0 0 0 0 0
43913- 0 0 0 0 0 0 0 0 0 0 0 0
43914- 0 0 0 0 0 0 0 0 0 0 0 0
43915- 0 0 0 0 0 0 6 6 6 18 18 18
43916- 50 50 50 74 74 74 2 2 6 2 2 6
43917- 14 14 14 70 70 70 34 34 34 62 62 62
43918-250 250 250 253 253 253 253 253 253 253 253 253
43919-253 253 253 253 253 253 253 253 253 253 253 253
43920-253 253 253 253 253 253 231 231 231 246 246 246
43921-253 253 253 253 253 253 253 253 253 253 253 253
43922-253 253 253 253 253 253 253 253 253 253 253 253
43923-253 253 253 253 253 253 253 253 253 253 253 253
43924-253 253 253 253 253 253 253 253 253 253 253 253
43925-253 253 253 253 253 253 234 234 234 14 14 14
43926- 2 2 6 2 2 6 30 30 30 2 2 6
43927- 2 2 6 2 2 6 2 2 6 2 2 6
43928- 2 2 6 66 66 66 62 62 62 22 22 22
43929- 6 6 6 0 0 0 0 0 0 0 0 0
43930- 0 0 0 0 0 0 0 0 0 0 0 0
43931- 0 0 0 0 0 0 0 0 0 0 0 0
43932- 0 0 0 0 0 0 0 0 0 0 0 0
43933- 0 0 0 0 0 0 0 0 0 0 0 0
43934- 0 0 0 0 0 0 0 0 0 0 0 0
43935- 0 0 0 0 0 0 6 6 6 18 18 18
43936- 54 54 54 62 62 62 2 2 6 2 2 6
43937- 2 2 6 30 30 30 46 46 46 70 70 70
43938-250 250 250 253 253 253 253 253 253 253 253 253
43939-253 253 253 253 253 253 253 253 253 253 253 253
43940-253 253 253 253 253 253 231 231 231 246 246 246
43941-253 253 253 253 253 253 253 253 253 253 253 253
43942-253 253 253 253 253 253 253 253 253 253 253 253
43943-253 253 253 253 253 253 253 253 253 253 253 253
43944-253 253 253 253 253 253 253 253 253 253 253 253
43945-253 253 253 253 253 253 226 226 226 10 10 10
43946- 2 2 6 6 6 6 30 30 30 2 2 6
43947- 2 2 6 2 2 6 2 2 6 2 2 6
43948- 2 2 6 66 66 66 58 58 58 22 22 22
43949- 6 6 6 0 0 0 0 0 0 0 0 0
43950- 0 0 0 0 0 0 0 0 0 0 0 0
43951- 0 0 0 0 0 0 0 0 0 0 0 0
43952- 0 0 0 0 0 0 0 0 0 0 0 0
43953- 0 0 0 0 0 0 0 0 0 0 0 0
43954- 0 0 0 0 0 0 0 0 0 0 0 0
43955- 0 0 0 0 0 0 6 6 6 22 22 22
43956- 58 58 58 62 62 62 2 2 6 2 2 6
43957- 2 2 6 2 2 6 30 30 30 78 78 78
43958-250 250 250 253 253 253 253 253 253 253 253 253
43959-253 253 253 253 253 253 253 253 253 253 253 253
43960-253 253 253 253 253 253 231 231 231 246 246 246
43961-253 253 253 253 253 253 253 253 253 253 253 253
43962-253 253 253 253 253 253 253 253 253 253 253 253
43963-253 253 253 253 253 253 253 253 253 253 253 253
43964-253 253 253 253 253 253 253 253 253 253 253 253
43965-253 253 253 253 253 253 206 206 206 2 2 6
43966- 22 22 22 34 34 34 18 14 6 22 22 22
43967- 26 26 26 18 18 18 6 6 6 2 2 6
43968- 2 2 6 82 82 82 54 54 54 18 18 18
43969- 6 6 6 0 0 0 0 0 0 0 0 0
43970- 0 0 0 0 0 0 0 0 0 0 0 0
43971- 0 0 0 0 0 0 0 0 0 0 0 0
43972- 0 0 0 0 0 0 0 0 0 0 0 0
43973- 0 0 0 0 0 0 0 0 0 0 0 0
43974- 0 0 0 0 0 0 0 0 0 0 0 0
43975- 0 0 0 0 0 0 6 6 6 26 26 26
43976- 62 62 62 106 106 106 74 54 14 185 133 11
43977-210 162 10 121 92 8 6 6 6 62 62 62
43978-238 238 238 253 253 253 253 253 253 253 253 253
43979-253 253 253 253 253 253 253 253 253 253 253 253
43980-253 253 253 253 253 253 231 231 231 246 246 246
43981-253 253 253 253 253 253 253 253 253 253 253 253
43982-253 253 253 253 253 253 253 253 253 253 253 253
43983-253 253 253 253 253 253 253 253 253 253 253 253
43984-253 253 253 253 253 253 253 253 253 253 253 253
43985-253 253 253 253 253 253 158 158 158 18 18 18
43986- 14 14 14 2 2 6 2 2 6 2 2 6
43987- 6 6 6 18 18 18 66 66 66 38 38 38
43988- 6 6 6 94 94 94 50 50 50 18 18 18
43989- 6 6 6 0 0 0 0 0 0 0 0 0
43990- 0 0 0 0 0 0 0 0 0 0 0 0
43991- 0 0 0 0 0 0 0 0 0 0 0 0
43992- 0 0 0 0 0 0 0 0 0 0 0 0
43993- 0 0 0 0 0 0 0 0 0 0 0 0
43994- 0 0 0 0 0 0 0 0 0 6 6 6
43995- 10 10 10 10 10 10 18 18 18 38 38 38
43996- 78 78 78 142 134 106 216 158 10 242 186 14
43997-246 190 14 246 190 14 156 118 10 10 10 10
43998- 90 90 90 238 238 238 253 253 253 253 253 253
43999-253 253 253 253 253 253 253 253 253 253 253 253
44000-253 253 253 253 253 253 231 231 231 250 250 250
44001-253 253 253 253 253 253 253 253 253 253 253 253
44002-253 253 253 253 253 253 253 253 253 253 253 253
44003-253 253 253 253 253 253 253 253 253 253 253 253
44004-253 253 253 253 253 253 253 253 253 246 230 190
44005-238 204 91 238 204 91 181 142 44 37 26 9
44006- 2 2 6 2 2 6 2 2 6 2 2 6
44007- 2 2 6 2 2 6 38 38 38 46 46 46
44008- 26 26 26 106 106 106 54 54 54 18 18 18
44009- 6 6 6 0 0 0 0 0 0 0 0 0
44010- 0 0 0 0 0 0 0 0 0 0 0 0
44011- 0 0 0 0 0 0 0 0 0 0 0 0
44012- 0 0 0 0 0 0 0 0 0 0 0 0
44013- 0 0 0 0 0 0 0 0 0 0 0 0
44014- 0 0 0 6 6 6 14 14 14 22 22 22
44015- 30 30 30 38 38 38 50 50 50 70 70 70
44016-106 106 106 190 142 34 226 170 11 242 186 14
44017-246 190 14 246 190 14 246 190 14 154 114 10
44018- 6 6 6 74 74 74 226 226 226 253 253 253
44019-253 253 253 253 253 253 253 253 253 253 253 253
44020-253 253 253 253 253 253 231 231 231 250 250 250
44021-253 253 253 253 253 253 253 253 253 253 253 253
44022-253 253 253 253 253 253 253 253 253 253 253 253
44023-253 253 253 253 253 253 253 253 253 253 253 253
44024-253 253 253 253 253 253 253 253 253 228 184 62
44025-241 196 14 241 208 19 232 195 16 38 30 10
44026- 2 2 6 2 2 6 2 2 6 2 2 6
44027- 2 2 6 6 6 6 30 30 30 26 26 26
44028-203 166 17 154 142 90 66 66 66 26 26 26
44029- 6 6 6 0 0 0 0 0 0 0 0 0
44030- 0 0 0 0 0 0 0 0 0 0 0 0
44031- 0 0 0 0 0 0 0 0 0 0 0 0
44032- 0 0 0 0 0 0 0 0 0 0 0 0
44033- 0 0 0 0 0 0 0 0 0 0 0 0
44034- 6 6 6 18 18 18 38 38 38 58 58 58
44035- 78 78 78 86 86 86 101 101 101 123 123 123
44036-175 146 61 210 150 10 234 174 13 246 186 14
44037-246 190 14 246 190 14 246 190 14 238 190 10
44038-102 78 10 2 2 6 46 46 46 198 198 198
44039-253 253 253 253 253 253 253 253 253 253 253 253
44040-253 253 253 253 253 253 234 234 234 242 242 242
44041-253 253 253 253 253 253 253 253 253 253 253 253
44042-253 253 253 253 253 253 253 253 253 253 253 253
44043-253 253 253 253 253 253 253 253 253 253 253 253
44044-253 253 253 253 253 253 253 253 253 224 178 62
44045-242 186 14 241 196 14 210 166 10 22 18 6
44046- 2 2 6 2 2 6 2 2 6 2 2 6
44047- 2 2 6 2 2 6 6 6 6 121 92 8
44048-238 202 15 232 195 16 82 82 82 34 34 34
44049- 10 10 10 0 0 0 0 0 0 0 0 0
44050- 0 0 0 0 0 0 0 0 0 0 0 0
44051- 0 0 0 0 0 0 0 0 0 0 0 0
44052- 0 0 0 0 0 0 0 0 0 0 0 0
44053- 0 0 0 0 0 0 0 0 0 0 0 0
44054- 14 14 14 38 38 38 70 70 70 154 122 46
44055-190 142 34 200 144 11 197 138 11 197 138 11
44056-213 154 11 226 170 11 242 186 14 246 190 14
44057-246 190 14 246 190 14 246 190 14 246 190 14
44058-225 175 15 46 32 6 2 2 6 22 22 22
44059-158 158 158 250 250 250 253 253 253 253 253 253
44060-253 253 253 253 253 253 253 253 253 253 253 253
44061-253 253 253 253 253 253 253 253 253 253 253 253
44062-253 253 253 253 253 253 253 253 253 253 253 253
44063-253 253 253 253 253 253 253 253 253 253 253 253
44064-253 253 253 250 250 250 242 242 242 224 178 62
44065-239 182 13 236 186 11 213 154 11 46 32 6
44066- 2 2 6 2 2 6 2 2 6 2 2 6
44067- 2 2 6 2 2 6 61 42 6 225 175 15
44068-238 190 10 236 186 11 112 100 78 42 42 42
44069- 14 14 14 0 0 0 0 0 0 0 0 0
44070- 0 0 0 0 0 0 0 0 0 0 0 0
44071- 0 0 0 0 0 0 0 0 0 0 0 0
44072- 0 0 0 0 0 0 0 0 0 0 0 0
44073- 0 0 0 0 0 0 0 0 0 6 6 6
44074- 22 22 22 54 54 54 154 122 46 213 154 11
44075-226 170 11 230 174 11 226 170 11 226 170 11
44076-236 178 12 242 186 14 246 190 14 246 190 14
44077-246 190 14 246 190 14 246 190 14 246 190 14
44078-241 196 14 184 144 12 10 10 10 2 2 6
44079- 6 6 6 116 116 116 242 242 242 253 253 253
44080-253 253 253 253 253 253 253 253 253 253 253 253
44081-253 253 253 253 253 253 253 253 253 253 253 253
44082-253 253 253 253 253 253 253 253 253 253 253 253
44083-253 253 253 253 253 253 253 253 253 253 253 253
44084-253 253 253 231 231 231 198 198 198 214 170 54
44085-236 178 12 236 178 12 210 150 10 137 92 6
44086- 18 14 6 2 2 6 2 2 6 2 2 6
44087- 6 6 6 70 47 6 200 144 11 236 178 12
44088-239 182 13 239 182 13 124 112 88 58 58 58
44089- 22 22 22 6 6 6 0 0 0 0 0 0
44090- 0 0 0 0 0 0 0 0 0 0 0 0
44091- 0 0 0 0 0 0 0 0 0 0 0 0
44092- 0 0 0 0 0 0 0 0 0 0 0 0
44093- 0 0 0 0 0 0 0 0 0 10 10 10
44094- 30 30 30 70 70 70 180 133 36 226 170 11
44095-239 182 13 242 186 14 242 186 14 246 186 14
44096-246 190 14 246 190 14 246 190 14 246 190 14
44097-246 190 14 246 190 14 246 190 14 246 190 14
44098-246 190 14 232 195 16 98 70 6 2 2 6
44099- 2 2 6 2 2 6 66 66 66 221 221 221
44100-253 253 253 253 253 253 253 253 253 253 253 253
44101-253 253 253 253 253 253 253 253 253 253 253 253
44102-253 253 253 253 253 253 253 253 253 253 253 253
44103-253 253 253 253 253 253 253 253 253 253 253 253
44104-253 253 253 206 206 206 198 198 198 214 166 58
44105-230 174 11 230 174 11 216 158 10 192 133 9
44106-163 110 8 116 81 8 102 78 10 116 81 8
44107-167 114 7 197 138 11 226 170 11 239 182 13
44108-242 186 14 242 186 14 162 146 94 78 78 78
44109- 34 34 34 14 14 14 6 6 6 0 0 0
44110- 0 0 0 0 0 0 0 0 0 0 0 0
44111- 0 0 0 0 0 0 0 0 0 0 0 0
44112- 0 0 0 0 0 0 0 0 0 0 0 0
44113- 0 0 0 0 0 0 0 0 0 6 6 6
44114- 30 30 30 78 78 78 190 142 34 226 170 11
44115-239 182 13 246 190 14 246 190 14 246 190 14
44116-246 190 14 246 190 14 246 190 14 246 190 14
44117-246 190 14 246 190 14 246 190 14 246 190 14
44118-246 190 14 241 196 14 203 166 17 22 18 6
44119- 2 2 6 2 2 6 2 2 6 38 38 38
44120-218 218 218 253 253 253 253 253 253 253 253 253
44121-253 253 253 253 253 253 253 253 253 253 253 253
44122-253 253 253 253 253 253 253 253 253 253 253 253
44123-253 253 253 253 253 253 253 253 253 253 253 253
44124-250 250 250 206 206 206 198 198 198 202 162 69
44125-226 170 11 236 178 12 224 166 10 210 150 10
44126-200 144 11 197 138 11 192 133 9 197 138 11
44127-210 150 10 226 170 11 242 186 14 246 190 14
44128-246 190 14 246 186 14 225 175 15 124 112 88
44129- 62 62 62 30 30 30 14 14 14 6 6 6
44130- 0 0 0 0 0 0 0 0 0 0 0 0
44131- 0 0 0 0 0 0 0 0 0 0 0 0
44132- 0 0 0 0 0 0 0 0 0 0 0 0
44133- 0 0 0 0 0 0 0 0 0 10 10 10
44134- 30 30 30 78 78 78 174 135 50 224 166 10
44135-239 182 13 246 190 14 246 190 14 246 190 14
44136-246 190 14 246 190 14 246 190 14 246 190 14
44137-246 190 14 246 190 14 246 190 14 246 190 14
44138-246 190 14 246 190 14 241 196 14 139 102 15
44139- 2 2 6 2 2 6 2 2 6 2 2 6
44140- 78 78 78 250 250 250 253 253 253 253 253 253
44141-253 253 253 253 253 253 253 253 253 253 253 253
44142-253 253 253 253 253 253 253 253 253 253 253 253
44143-253 253 253 253 253 253 253 253 253 253 253 253
44144-250 250 250 214 214 214 198 198 198 190 150 46
44145-219 162 10 236 178 12 234 174 13 224 166 10
44146-216 158 10 213 154 11 213 154 11 216 158 10
44147-226 170 11 239 182 13 246 190 14 246 190 14
44148-246 190 14 246 190 14 242 186 14 206 162 42
44149-101 101 101 58 58 58 30 30 30 14 14 14
44150- 6 6 6 0 0 0 0 0 0 0 0 0
44151- 0 0 0 0 0 0 0 0 0 0 0 0
44152- 0 0 0 0 0 0 0 0 0 0 0 0
44153- 0 0 0 0 0 0 0 0 0 10 10 10
44154- 30 30 30 74 74 74 174 135 50 216 158 10
44155-236 178 12 246 190 14 246 190 14 246 190 14
44156-246 190 14 246 190 14 246 190 14 246 190 14
44157-246 190 14 246 190 14 246 190 14 246 190 14
44158-246 190 14 246 190 14 241 196 14 226 184 13
44159- 61 42 6 2 2 6 2 2 6 2 2 6
44160- 22 22 22 238 238 238 253 253 253 253 253 253
44161-253 253 253 253 253 253 253 253 253 253 253 253
44162-253 253 253 253 253 253 253 253 253 253 253 253
44163-253 253 253 253 253 253 253 253 253 253 253 253
44164-253 253 253 226 226 226 187 187 187 180 133 36
44165-216 158 10 236 178 12 239 182 13 236 178 12
44166-230 174 11 226 170 11 226 170 11 230 174 11
44167-236 178 12 242 186 14 246 190 14 246 190 14
44168-246 190 14 246 190 14 246 186 14 239 182 13
44169-206 162 42 106 106 106 66 66 66 34 34 34
44170- 14 14 14 6 6 6 0 0 0 0 0 0
44171- 0 0 0 0 0 0 0 0 0 0 0 0
44172- 0 0 0 0 0 0 0 0 0 0 0 0
44173- 0 0 0 0 0 0 0 0 0 6 6 6
44174- 26 26 26 70 70 70 163 133 67 213 154 11
44175-236 178 12 246 190 14 246 190 14 246 190 14
44176-246 190 14 246 190 14 246 190 14 246 190 14
44177-246 190 14 246 190 14 246 190 14 246 190 14
44178-246 190 14 246 190 14 246 190 14 241 196 14
44179-190 146 13 18 14 6 2 2 6 2 2 6
44180- 46 46 46 246 246 246 253 253 253 253 253 253
44181-253 253 253 253 253 253 253 253 253 253 253 253
44182-253 253 253 253 253 253 253 253 253 253 253 253
44183-253 253 253 253 253 253 253 253 253 253 253 253
44184-253 253 253 221 221 221 86 86 86 156 107 11
44185-216 158 10 236 178 12 242 186 14 246 186 14
44186-242 186 14 239 182 13 239 182 13 242 186 14
44187-242 186 14 246 186 14 246 190 14 246 190 14
44188-246 190 14 246 190 14 246 190 14 246 190 14
44189-242 186 14 225 175 15 142 122 72 66 66 66
44190- 30 30 30 10 10 10 0 0 0 0 0 0
44191- 0 0 0 0 0 0 0 0 0 0 0 0
44192- 0 0 0 0 0 0 0 0 0 0 0 0
44193- 0 0 0 0 0 0 0 0 0 6 6 6
44194- 26 26 26 70 70 70 163 133 67 210 150 10
44195-236 178 12 246 190 14 246 190 14 246 190 14
44196-246 190 14 246 190 14 246 190 14 246 190 14
44197-246 190 14 246 190 14 246 190 14 246 190 14
44198-246 190 14 246 190 14 246 190 14 246 190 14
44199-232 195 16 121 92 8 34 34 34 106 106 106
44200-221 221 221 253 253 253 253 253 253 253 253 253
44201-253 253 253 253 253 253 253 253 253 253 253 253
44202-253 253 253 253 253 253 253 253 253 253 253 253
44203-253 253 253 253 253 253 253 253 253 253 253 253
44204-242 242 242 82 82 82 18 14 6 163 110 8
44205-216 158 10 236 178 12 242 186 14 246 190 14
44206-246 190 14 246 190 14 246 190 14 246 190 14
44207-246 190 14 246 190 14 246 190 14 246 190 14
44208-246 190 14 246 190 14 246 190 14 246 190 14
44209-246 190 14 246 190 14 242 186 14 163 133 67
44210- 46 46 46 18 18 18 6 6 6 0 0 0
44211- 0 0 0 0 0 0 0 0 0 0 0 0
44212- 0 0 0 0 0 0 0 0 0 0 0 0
44213- 0 0 0 0 0 0 0 0 0 10 10 10
44214- 30 30 30 78 78 78 163 133 67 210 150 10
44215-236 178 12 246 186 14 246 190 14 246 190 14
44216-246 190 14 246 190 14 246 190 14 246 190 14
44217-246 190 14 246 190 14 246 190 14 246 190 14
44218-246 190 14 246 190 14 246 190 14 246 190 14
44219-241 196 14 215 174 15 190 178 144 253 253 253
44220-253 253 253 253 253 253 253 253 253 253 253 253
44221-253 253 253 253 253 253 253 253 253 253 253 253
44222-253 253 253 253 253 253 253 253 253 253 253 253
44223-253 253 253 253 253 253 253 253 253 218 218 218
44224- 58 58 58 2 2 6 22 18 6 167 114 7
44225-216 158 10 236 178 12 246 186 14 246 190 14
44226-246 190 14 246 190 14 246 190 14 246 190 14
44227-246 190 14 246 190 14 246 190 14 246 190 14
44228-246 190 14 246 190 14 246 190 14 246 190 14
44229-246 190 14 246 186 14 242 186 14 190 150 46
44230- 54 54 54 22 22 22 6 6 6 0 0 0
44231- 0 0 0 0 0 0 0 0 0 0 0 0
44232- 0 0 0 0 0 0 0 0 0 0 0 0
44233- 0 0 0 0 0 0 0 0 0 14 14 14
44234- 38 38 38 86 86 86 180 133 36 213 154 11
44235-236 178 12 246 186 14 246 190 14 246 190 14
44236-246 190 14 246 190 14 246 190 14 246 190 14
44237-246 190 14 246 190 14 246 190 14 246 190 14
44238-246 190 14 246 190 14 246 190 14 246 190 14
44239-246 190 14 232 195 16 190 146 13 214 214 214
44240-253 253 253 253 253 253 253 253 253 253 253 253
44241-253 253 253 253 253 253 253 253 253 253 253 253
44242-253 253 253 253 253 253 253 253 253 253 253 253
44243-253 253 253 250 250 250 170 170 170 26 26 26
44244- 2 2 6 2 2 6 37 26 9 163 110 8
44245-219 162 10 239 182 13 246 186 14 246 190 14
44246-246 190 14 246 190 14 246 190 14 246 190 14
44247-246 190 14 246 190 14 246 190 14 246 190 14
44248-246 190 14 246 190 14 246 190 14 246 190 14
44249-246 186 14 236 178 12 224 166 10 142 122 72
44250- 46 46 46 18 18 18 6 6 6 0 0 0
44251- 0 0 0 0 0 0 0 0 0 0 0 0
44252- 0 0 0 0 0 0 0 0 0 0 0 0
44253- 0 0 0 0 0 0 6 6 6 18 18 18
44254- 50 50 50 109 106 95 192 133 9 224 166 10
44255-242 186 14 246 190 14 246 190 14 246 190 14
44256-246 190 14 246 190 14 246 190 14 246 190 14
44257-246 190 14 246 190 14 246 190 14 246 190 14
44258-246 190 14 246 190 14 246 190 14 246 190 14
44259-242 186 14 226 184 13 210 162 10 142 110 46
44260-226 226 226 253 253 253 253 253 253 253 253 253
44261-253 253 253 253 253 253 253 253 253 253 253 253
44262-253 253 253 253 253 253 253 253 253 253 253 253
44263-198 198 198 66 66 66 2 2 6 2 2 6
44264- 2 2 6 2 2 6 50 34 6 156 107 11
44265-219 162 10 239 182 13 246 186 14 246 190 14
44266-246 190 14 246 190 14 246 190 14 246 190 14
44267-246 190 14 246 190 14 246 190 14 246 190 14
44268-246 190 14 246 190 14 246 190 14 242 186 14
44269-234 174 13 213 154 11 154 122 46 66 66 66
44270- 30 30 30 10 10 10 0 0 0 0 0 0
44271- 0 0 0 0 0 0 0 0 0 0 0 0
44272- 0 0 0 0 0 0 0 0 0 0 0 0
44273- 0 0 0 0 0 0 6 6 6 22 22 22
44274- 58 58 58 154 121 60 206 145 10 234 174 13
44275-242 186 14 246 186 14 246 190 14 246 190 14
44276-246 190 14 246 190 14 246 190 14 246 190 14
44277-246 190 14 246 190 14 246 190 14 246 190 14
44278-246 190 14 246 190 14 246 190 14 246 190 14
44279-246 186 14 236 178 12 210 162 10 163 110 8
44280- 61 42 6 138 138 138 218 218 218 250 250 250
44281-253 253 253 253 253 253 253 253 253 250 250 250
44282-242 242 242 210 210 210 144 144 144 66 66 66
44283- 6 6 6 2 2 6 2 2 6 2 2 6
44284- 2 2 6 2 2 6 61 42 6 163 110 8
44285-216 158 10 236 178 12 246 190 14 246 190 14
44286-246 190 14 246 190 14 246 190 14 246 190 14
44287-246 190 14 246 190 14 246 190 14 246 190 14
44288-246 190 14 239 182 13 230 174 11 216 158 10
44289-190 142 34 124 112 88 70 70 70 38 38 38
44290- 18 18 18 6 6 6 0 0 0 0 0 0
44291- 0 0 0 0 0 0 0 0 0 0 0 0
44292- 0 0 0 0 0 0 0 0 0 0 0 0
44293- 0 0 0 0 0 0 6 6 6 22 22 22
44294- 62 62 62 168 124 44 206 145 10 224 166 10
44295-236 178 12 239 182 13 242 186 14 242 186 14
44296-246 186 14 246 190 14 246 190 14 246 190 14
44297-246 190 14 246 190 14 246 190 14 246 190 14
44298-246 190 14 246 190 14 246 190 14 246 190 14
44299-246 190 14 236 178 12 216 158 10 175 118 6
44300- 80 54 7 2 2 6 6 6 6 30 30 30
44301- 54 54 54 62 62 62 50 50 50 38 38 38
44302- 14 14 14 2 2 6 2 2 6 2 2 6
44303- 2 2 6 2 2 6 2 2 6 2 2 6
44304- 2 2 6 6 6 6 80 54 7 167 114 7
44305-213 154 11 236 178 12 246 190 14 246 190 14
44306-246 190 14 246 190 14 246 190 14 246 190 14
44307-246 190 14 242 186 14 239 182 13 239 182 13
44308-230 174 11 210 150 10 174 135 50 124 112 88
44309- 82 82 82 54 54 54 34 34 34 18 18 18
44310- 6 6 6 0 0 0 0 0 0 0 0 0
44311- 0 0 0 0 0 0 0 0 0 0 0 0
44312- 0 0 0 0 0 0 0 0 0 0 0 0
44313- 0 0 0 0 0 0 6 6 6 18 18 18
44314- 50 50 50 158 118 36 192 133 9 200 144 11
44315-216 158 10 219 162 10 224 166 10 226 170 11
44316-230 174 11 236 178 12 239 182 13 239 182 13
44317-242 186 14 246 186 14 246 190 14 246 190 14
44318-246 190 14 246 190 14 246 190 14 246 190 14
44319-246 186 14 230 174 11 210 150 10 163 110 8
44320-104 69 6 10 10 10 2 2 6 2 2 6
44321- 2 2 6 2 2 6 2 2 6 2 2 6
44322- 2 2 6 2 2 6 2 2 6 2 2 6
44323- 2 2 6 2 2 6 2 2 6 2 2 6
44324- 2 2 6 6 6 6 91 60 6 167 114 7
44325-206 145 10 230 174 11 242 186 14 246 190 14
44326-246 190 14 246 190 14 246 186 14 242 186 14
44327-239 182 13 230 174 11 224 166 10 213 154 11
44328-180 133 36 124 112 88 86 86 86 58 58 58
44329- 38 38 38 22 22 22 10 10 10 6 6 6
44330- 0 0 0 0 0 0 0 0 0 0 0 0
44331- 0 0 0 0 0 0 0 0 0 0 0 0
44332- 0 0 0 0 0 0 0 0 0 0 0 0
44333- 0 0 0 0 0 0 0 0 0 14 14 14
44334- 34 34 34 70 70 70 138 110 50 158 118 36
44335-167 114 7 180 123 7 192 133 9 197 138 11
44336-200 144 11 206 145 10 213 154 11 219 162 10
44337-224 166 10 230 174 11 239 182 13 242 186 14
44338-246 186 14 246 186 14 246 186 14 246 186 14
44339-239 182 13 216 158 10 185 133 11 152 99 6
44340-104 69 6 18 14 6 2 2 6 2 2 6
44341- 2 2 6 2 2 6 2 2 6 2 2 6
44342- 2 2 6 2 2 6 2 2 6 2 2 6
44343- 2 2 6 2 2 6 2 2 6 2 2 6
44344- 2 2 6 6 6 6 80 54 7 152 99 6
44345-192 133 9 219 162 10 236 178 12 239 182 13
44346-246 186 14 242 186 14 239 182 13 236 178 12
44347-224 166 10 206 145 10 192 133 9 154 121 60
44348- 94 94 94 62 62 62 42 42 42 22 22 22
44349- 14 14 14 6 6 6 0 0 0 0 0 0
44350- 0 0 0 0 0 0 0 0 0 0 0 0
44351- 0 0 0 0 0 0 0 0 0 0 0 0
44352- 0 0 0 0 0 0 0 0 0 0 0 0
44353- 0 0 0 0 0 0 0 0 0 6 6 6
44354- 18 18 18 34 34 34 58 58 58 78 78 78
44355-101 98 89 124 112 88 142 110 46 156 107 11
44356-163 110 8 167 114 7 175 118 6 180 123 7
44357-185 133 11 197 138 11 210 150 10 219 162 10
44358-226 170 11 236 178 12 236 178 12 234 174 13
44359-219 162 10 197 138 11 163 110 8 130 83 6
44360- 91 60 6 10 10 10 2 2 6 2 2 6
44361- 18 18 18 38 38 38 38 38 38 38 38 38
44362- 38 38 38 38 38 38 38 38 38 38 38 38
44363- 38 38 38 38 38 38 26 26 26 2 2 6
44364- 2 2 6 6 6 6 70 47 6 137 92 6
44365-175 118 6 200 144 11 219 162 10 230 174 11
44366-234 174 13 230 174 11 219 162 10 210 150 10
44367-192 133 9 163 110 8 124 112 88 82 82 82
44368- 50 50 50 30 30 30 14 14 14 6 6 6
44369- 0 0 0 0 0 0 0 0 0 0 0 0
44370- 0 0 0 0 0 0 0 0 0 0 0 0
44371- 0 0 0 0 0 0 0 0 0 0 0 0
44372- 0 0 0 0 0 0 0 0 0 0 0 0
44373- 0 0 0 0 0 0 0 0 0 0 0 0
44374- 6 6 6 14 14 14 22 22 22 34 34 34
44375- 42 42 42 58 58 58 74 74 74 86 86 86
44376-101 98 89 122 102 70 130 98 46 121 87 25
44377-137 92 6 152 99 6 163 110 8 180 123 7
44378-185 133 11 197 138 11 206 145 10 200 144 11
44379-180 123 7 156 107 11 130 83 6 104 69 6
44380- 50 34 6 54 54 54 110 110 110 101 98 89
44381- 86 86 86 82 82 82 78 78 78 78 78 78
44382- 78 78 78 78 78 78 78 78 78 78 78 78
44383- 78 78 78 82 82 82 86 86 86 94 94 94
44384-106 106 106 101 101 101 86 66 34 124 80 6
44385-156 107 11 180 123 7 192 133 9 200 144 11
44386-206 145 10 200 144 11 192 133 9 175 118 6
44387-139 102 15 109 106 95 70 70 70 42 42 42
44388- 22 22 22 10 10 10 0 0 0 0 0 0
44389- 0 0 0 0 0 0 0 0 0 0 0 0
44390- 0 0 0 0 0 0 0 0 0 0 0 0
44391- 0 0 0 0 0 0 0 0 0 0 0 0
44392- 0 0 0 0 0 0 0 0 0 0 0 0
44393- 0 0 0 0 0 0 0 0 0 0 0 0
44394- 0 0 0 0 0 0 6 6 6 10 10 10
44395- 14 14 14 22 22 22 30 30 30 38 38 38
44396- 50 50 50 62 62 62 74 74 74 90 90 90
44397-101 98 89 112 100 78 121 87 25 124 80 6
44398-137 92 6 152 99 6 152 99 6 152 99 6
44399-138 86 6 124 80 6 98 70 6 86 66 30
44400-101 98 89 82 82 82 58 58 58 46 46 46
44401- 38 38 38 34 34 34 34 34 34 34 34 34
44402- 34 34 34 34 34 34 34 34 34 34 34 34
44403- 34 34 34 34 34 34 38 38 38 42 42 42
44404- 54 54 54 82 82 82 94 86 76 91 60 6
44405-134 86 6 156 107 11 167 114 7 175 118 6
44406-175 118 6 167 114 7 152 99 6 121 87 25
44407-101 98 89 62 62 62 34 34 34 18 18 18
44408- 6 6 6 0 0 0 0 0 0 0 0 0
44409- 0 0 0 0 0 0 0 0 0 0 0 0
44410- 0 0 0 0 0 0 0 0 0 0 0 0
44411- 0 0 0 0 0 0 0 0 0 0 0 0
44412- 0 0 0 0 0 0 0 0 0 0 0 0
44413- 0 0 0 0 0 0 0 0 0 0 0 0
44414- 0 0 0 0 0 0 0 0 0 0 0 0
44415- 0 0 0 6 6 6 6 6 6 10 10 10
44416- 18 18 18 22 22 22 30 30 30 42 42 42
44417- 50 50 50 66 66 66 86 86 86 101 98 89
44418-106 86 58 98 70 6 104 69 6 104 69 6
44419-104 69 6 91 60 6 82 62 34 90 90 90
44420- 62 62 62 38 38 38 22 22 22 14 14 14
44421- 10 10 10 10 10 10 10 10 10 10 10 10
44422- 10 10 10 10 10 10 6 6 6 10 10 10
44423- 10 10 10 10 10 10 10 10 10 14 14 14
44424- 22 22 22 42 42 42 70 70 70 89 81 66
44425- 80 54 7 104 69 6 124 80 6 137 92 6
44426-134 86 6 116 81 8 100 82 52 86 86 86
44427- 58 58 58 30 30 30 14 14 14 6 6 6
44428- 0 0 0 0 0 0 0 0 0 0 0 0
44429- 0 0 0 0 0 0 0 0 0 0 0 0
44430- 0 0 0 0 0 0 0 0 0 0 0 0
44431- 0 0 0 0 0 0 0 0 0 0 0 0
44432- 0 0 0 0 0 0 0 0 0 0 0 0
44433- 0 0 0 0 0 0 0 0 0 0 0 0
44434- 0 0 0 0 0 0 0 0 0 0 0 0
44435- 0 0 0 0 0 0 0 0 0 0 0 0
44436- 0 0 0 6 6 6 10 10 10 14 14 14
44437- 18 18 18 26 26 26 38 38 38 54 54 54
44438- 70 70 70 86 86 86 94 86 76 89 81 66
44439- 89 81 66 86 86 86 74 74 74 50 50 50
44440- 30 30 30 14 14 14 6 6 6 0 0 0
44441- 0 0 0 0 0 0 0 0 0 0 0 0
44442- 0 0 0 0 0 0 0 0 0 0 0 0
44443- 0 0 0 0 0 0 0 0 0 0 0 0
44444- 6 6 6 18 18 18 34 34 34 58 58 58
44445- 82 82 82 89 81 66 89 81 66 89 81 66
44446- 94 86 66 94 86 76 74 74 74 50 50 50
44447- 26 26 26 14 14 14 6 6 6 0 0 0
44448- 0 0 0 0 0 0 0 0 0 0 0 0
44449- 0 0 0 0 0 0 0 0 0 0 0 0
44450- 0 0 0 0 0 0 0 0 0 0 0 0
44451- 0 0 0 0 0 0 0 0 0 0 0 0
44452- 0 0 0 0 0 0 0 0 0 0 0 0
44453- 0 0 0 0 0 0 0 0 0 0 0 0
44454- 0 0 0 0 0 0 0 0 0 0 0 0
44455- 0 0 0 0 0 0 0 0 0 0 0 0
44456- 0 0 0 0 0 0 0 0 0 0 0 0
44457- 6 6 6 6 6 6 14 14 14 18 18 18
44458- 30 30 30 38 38 38 46 46 46 54 54 54
44459- 50 50 50 42 42 42 30 30 30 18 18 18
44460- 10 10 10 0 0 0 0 0 0 0 0 0
44461- 0 0 0 0 0 0 0 0 0 0 0 0
44462- 0 0 0 0 0 0 0 0 0 0 0 0
44463- 0 0 0 0 0 0 0 0 0 0 0 0
44464- 0 0 0 6 6 6 14 14 14 26 26 26
44465- 38 38 38 50 50 50 58 58 58 58 58 58
44466- 54 54 54 42 42 42 30 30 30 18 18 18
44467- 10 10 10 0 0 0 0 0 0 0 0 0
44468- 0 0 0 0 0 0 0 0 0 0 0 0
44469- 0 0 0 0 0 0 0 0 0 0 0 0
44470- 0 0 0 0 0 0 0 0 0 0 0 0
44471- 0 0 0 0 0 0 0 0 0 0 0 0
44472- 0 0 0 0 0 0 0 0 0 0 0 0
44473- 0 0 0 0 0 0 0 0 0 0 0 0
44474- 0 0 0 0 0 0 0 0 0 0 0 0
44475- 0 0 0 0 0 0 0 0 0 0 0 0
44476- 0 0 0 0 0 0 0 0 0 0 0 0
44477- 0 0 0 0 0 0 0 0 0 6 6 6
44478- 6 6 6 10 10 10 14 14 14 18 18 18
44479- 18 18 18 14 14 14 10 10 10 6 6 6
44480- 0 0 0 0 0 0 0 0 0 0 0 0
44481- 0 0 0 0 0 0 0 0 0 0 0 0
44482- 0 0 0 0 0 0 0 0 0 0 0 0
44483- 0 0 0 0 0 0 0 0 0 0 0 0
44484- 0 0 0 0 0 0 0 0 0 6 6 6
44485- 14 14 14 18 18 18 22 22 22 22 22 22
44486- 18 18 18 14 14 14 10 10 10 6 6 6
44487- 0 0 0 0 0 0 0 0 0 0 0 0
44488- 0 0 0 0 0 0 0 0 0 0 0 0
44489- 0 0 0 0 0 0 0 0 0 0 0 0
44490- 0 0 0 0 0 0 0 0 0 0 0 0
44491- 0 0 0 0 0 0 0 0 0 0 0 0
44492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44505+4 4 4 4 4 4
44506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44519+4 4 4 4 4 4
44520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44533+4 4 4 4 4 4
44534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44547+4 4 4 4 4 4
44548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44561+4 4 4 4 4 4
44562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44575+4 4 4 4 4 4
44576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44580+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
44581+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
44582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44585+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
44586+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44587+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
44588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44589+4 4 4 4 4 4
44590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44594+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
44595+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
44596+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44599+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
44600+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
44601+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
44602+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44603+4 4 4 4 4 4
44604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44608+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
44609+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
44610+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44613+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
44614+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
44615+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
44616+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
44617+4 4 4 4 4 4
44618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44621+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
44622+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
44623+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
44624+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
44625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44626+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
44627+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
44628+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
44629+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
44630+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
44631+4 4 4 4 4 4
44632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44635+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
44636+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
44637+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
44638+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
44639+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
44640+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
44641+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
44642+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
44643+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
44644+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
44645+4 4 4 4 4 4
44646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
44649+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
44650+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
44651+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
44652+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
44653+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
44654+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
44655+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
44656+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
44657+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
44658+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
44659+4 4 4 4 4 4
44660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44662+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
44663+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
44664+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
44665+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
44666+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
44667+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
44668+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
44669+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
44670+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
44671+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
44672+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
44673+4 4 4 4 4 4
44674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44676+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
44677+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
44678+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
44679+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
44680+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
44681+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
44682+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
44683+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
44684+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
44685+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
44686+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
44687+4 4 4 4 4 4
44688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44690+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
44691+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
44692+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
44693+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
44694+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
44695+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
44696+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
44697+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
44698+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
44699+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
44700+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44701+4 4 4 4 4 4
44702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44704+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
44705+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
44706+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
44707+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
44708+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
44709+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
44710+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
44711+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
44712+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
44713+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
44714+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
44715+4 4 4 4 4 4
44716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44717+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
44718+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
44719+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
44720+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
44721+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
44722+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
44723+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
44724+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
44725+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
44726+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
44727+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
44728+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
44729+4 4 4 4 4 4
44730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44731+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
44732+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
44733+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
44734+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
44735+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
44736+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
44737+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
44738+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
44739+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
44740+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
44741+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
44742+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
44743+0 0 0 4 4 4
44744+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
44745+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
44746+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
44747+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
44748+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
44749+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
44750+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
44751+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
44752+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
44753+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
44754+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
44755+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
44756+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
44757+2 0 0 0 0 0
44758+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
44759+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
44760+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
44761+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
44762+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
44763+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
44764+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
44765+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
44766+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
44767+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
44768+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
44769+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
44770+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
44771+37 38 37 0 0 0
44772+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
44773+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
44774+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
44775+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
44776+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
44777+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
44778+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
44779+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
44780+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
44781+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
44782+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
44783+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
44784+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
44785+85 115 134 4 0 0
44786+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
44787+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
44788+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
44789+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
44790+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
44791+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
44792+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
44793+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
44794+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
44795+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
44796+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
44797+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
44798+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
44799+60 73 81 4 0 0
44800+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
44801+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
44802+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
44803+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
44804+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
44805+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
44806+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
44807+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
44808+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
44809+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
44810+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
44811+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
44812+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
44813+16 19 21 4 0 0
44814+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
44815+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
44816+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
44817+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
44818+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
44819+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
44820+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
44821+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
44822+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
44823+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
44824+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
44825+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
44826+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
44827+4 0 0 4 3 3
44828+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
44829+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
44830+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
44831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
44832+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
44833+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
44834+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
44835+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
44836+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
44837+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
44838+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
44839+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
44840+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
44841+3 2 2 4 4 4
44842+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
44843+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
44844+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
44845+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
44846+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
44847+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
44848+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
44849+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
44850+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
44851+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
44852+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
44853+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
44854+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
44855+4 4 4 4 4 4
44856+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
44857+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
44858+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
44859+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
44860+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
44861+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
44862+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
44863+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
44864+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
44865+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
44866+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
44867+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
44868+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
44869+4 4 4 4 4 4
44870+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
44871+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
44872+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
44873+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
44874+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
44875+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
44876+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
44877+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
44878+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
44879+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
44880+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
44881+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
44882+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
44883+5 5 5 5 5 5
44884+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
44885+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
44886+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
44887+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
44888+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
44889+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44890+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
44891+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
44892+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
44893+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
44894+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
44895+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
44896+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
44897+5 5 5 4 4 4
44898+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
44899+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
44900+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
44901+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
44902+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
44903+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
44904+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
44905+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
44906+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
44907+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
44908+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
44909+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
44910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44911+4 4 4 4 4 4
44912+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
44913+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
44914+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
44915+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
44916+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
44917+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44918+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44919+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
44920+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
44921+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
44922+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
44923+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
44924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44925+4 4 4 4 4 4
44926+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
44927+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
44928+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
44929+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
44930+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
44931+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
44932+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
44933+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
44934+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
44935+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
44936+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
44937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44939+4 4 4 4 4 4
44940+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
44941+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
44942+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
44943+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
44944+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
44945+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44946+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44947+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
44948+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
44949+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
44950+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
44951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44953+4 4 4 4 4 4
44954+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
44955+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
44956+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
44957+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
44958+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
44959+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
44960+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
44961+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
44962+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
44963+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
44964+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44967+4 4 4 4 4 4
44968+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
44969+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
44970+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
44971+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
44972+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
44973+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
44974+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
44975+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
44976+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
44977+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
44978+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
44979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44981+4 4 4 4 4 4
44982+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
44983+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
44984+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
44985+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
44986+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
44987+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
44988+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
44989+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
44990+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
44991+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
44992+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
44993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44995+4 4 4 4 4 4
44996+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
44997+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
44998+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
44999+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45000+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
45001+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
45002+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
45003+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
45004+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
45005+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
45006+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45009+4 4 4 4 4 4
45010+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
45011+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
45012+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
45013+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45014+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45015+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
45016+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
45017+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
45018+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
45019+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
45020+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45023+4 4 4 4 4 4
45024+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
45025+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
45026+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45027+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45028+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45029+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
45030+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
45031+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
45032+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
45033+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
45034+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45037+4 4 4 4 4 4
45038+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
45039+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
45040+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45041+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45042+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45043+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
45044+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
45045+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
45046+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45047+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45048+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45051+4 4 4 4 4 4
45052+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45053+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
45054+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45055+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
45056+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
45057+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
45058+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
45059+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
45060+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45061+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45062+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45065+4 4 4 4 4 4
45066+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45067+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
45068+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45069+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
45070+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45071+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
45072+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
45073+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
45074+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45075+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45076+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45079+4 4 4 4 4 4
45080+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
45081+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
45082+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45083+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
45084+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
45085+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
45086+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
45087+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
45088+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45089+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45090+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45093+4 4 4 4 4 4
45094+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
45095+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
45096+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45097+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
45098+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
45099+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
45100+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
45101+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
45102+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45103+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45104+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45107+4 4 4 4 4 4
45108+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45109+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
45110+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45111+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
45112+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
45113+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
45114+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
45115+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
45116+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45117+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45118+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45121+4 4 4 4 4 4
45122+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
45123+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
45124+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45125+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
45126+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
45127+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
45128+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
45129+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
45130+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
45131+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45132+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45135+4 4 4 4 4 4
45136+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45137+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
45138+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
45139+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
45140+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
45141+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
45142+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
45143+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
45144+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45145+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45146+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45149+4 4 4 4 4 4
45150+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45151+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
45152+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45153+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
45154+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
45155+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
45156+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
45157+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
45158+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45159+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45160+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45163+4 4 4 4 4 4
45164+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45165+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
45166+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
45167+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
45168+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
45169+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
45170+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45171+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
45172+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45173+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45174+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45177+4 4 4 4 4 4
45178+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45179+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
45180+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
45181+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45182+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
45183+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
45184+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45185+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
45186+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45187+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45188+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45191+4 4 4 4 4 4
45192+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45193+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
45194+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
45195+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
45196+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
45197+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
45198+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
45199+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
45200+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
45201+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45202+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45205+4 4 4 4 4 4
45206+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45207+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
45208+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
45209+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
45210+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
45211+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
45212+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
45213+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
45214+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
45215+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45216+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45219+4 4 4 4 4 4
45220+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
45221+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
45222+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
45223+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
45224+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45225+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
45226+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
45227+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
45228+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
45229+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45230+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45233+4 4 4 4 4 4
45234+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45235+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
45236+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
45237+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
45238+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
45239+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
45240+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
45241+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
45242+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
45243+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45244+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45247+4 4 4 4 4 4
45248+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
45249+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
45250+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
45251+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
45252+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
45253+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
45254+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
45255+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
45256+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
45257+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
45258+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45261+4 4 4 4 4 4
45262+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
45263+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45264+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
45265+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
45266+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
45267+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
45268+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
45269+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
45270+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
45271+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
45272+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45275+4 4 4 4 4 4
45276+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
45277+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45278+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
45279+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
45280+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
45281+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
45282+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45283+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
45284+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
45285+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
45286+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45289+4 4 4 4 4 4
45290+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
45291+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
45292+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
45293+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
45294+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
45295+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
45296+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
45297+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
45298+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
45299+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
45300+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45303+4 4 4 4 4 4
45304+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
45305+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
45306+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45307+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
45308+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
45309+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
45310+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
45311+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
45312+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
45313+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
45314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45317+4 4 4 4 4 4
45318+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45319+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
45320+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
45321+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
45322+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
45323+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
45324+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
45325+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
45326+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
45327+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45331+4 4 4 4 4 4
45332+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
45333+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
45334+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
45335+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
45336+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
45337+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
45338+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
45339+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
45340+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
45341+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45345+4 4 4 4 4 4
45346+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
45347+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
45348+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
45349+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
45350+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
45351+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
45352+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
45353+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
45354+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45355+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45359+4 4 4 4 4 4
45360+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
45361+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45362+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
45363+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45364+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
45365+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
45366+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
45367+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
45368+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
45369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45373+4 4 4 4 4 4
45374+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
45375+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
45376+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
45377+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
45378+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
45379+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
45380+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
45381+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
45382+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
45383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45387+4 4 4 4 4 4
45388+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45389+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
45390+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
45391+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
45392+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
45393+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
45394+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
45395+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
45396+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45401+4 4 4 4 4 4
45402+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
45403+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
45404+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45405+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
45406+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
45407+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
45408+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
45409+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
45410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45415+4 4 4 4 4 4
45416+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45417+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
45418+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
45419+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
45420+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
45421+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
45422+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
45423+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45429+4 4 4 4 4 4
45430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45431+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
45432+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45433+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
45434+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
45435+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
45436+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
45437+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
45438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45443+4 4 4 4 4 4
45444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45445+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
45446+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
45447+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
45448+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
45449+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
45450+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
45451+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
45452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45457+4 4 4 4 4 4
45458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45459+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45460+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
45461+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45462+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
45463+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
45464+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
45465+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45471+4 4 4 4 4 4
45472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45474+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45475+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
45476+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
45477+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
45478+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
45479+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45485+4 4 4 4 4 4
45486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45489+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45490+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
45491+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
45492+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
45493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45499+4 4 4 4 4 4
45500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45503+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45504+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45505+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
45506+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
45507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45513+4 4 4 4 4 4
45514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45517+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45518+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45519+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45520+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
45521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45527+4 4 4 4 4 4
45528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45531+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
45532+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
45533+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
45534+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
45535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45541+4 4 4 4 4 4
45542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45546+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
45547+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45548+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45555+4 4 4 4 4 4
45556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45560+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
45561+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
45562+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45569+4 4 4 4 4 4
45570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45574+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
45575+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
45576+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45583+4 4 4 4 4 4
45584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45588+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
45589+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
45590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45597+4 4 4 4 4 4
45598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45602+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45603+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
45604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45611+4 4 4 4 4 4
45612diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
45613index 443e3c8..c443d6a 100644
45614--- a/drivers/video/nvidia/nv_backlight.c
45615+++ b/drivers/video/nvidia/nv_backlight.c
45616@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
45617 return bd->props.brightness;
45618 }
45619
45620-static struct backlight_ops nvidia_bl_ops = {
45621+static const struct backlight_ops nvidia_bl_ops = {
45622 .get_brightness = nvidia_bl_get_brightness,
45623 .update_status = nvidia_bl_update_status,
45624 };
45625diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
45626index d94c57f..912984c 100644
45627--- a/drivers/video/riva/fbdev.c
45628+++ b/drivers/video/riva/fbdev.c
45629@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
45630 return bd->props.brightness;
45631 }
45632
45633-static struct backlight_ops riva_bl_ops = {
45634+static const struct backlight_ops riva_bl_ops = {
45635 .get_brightness = riva_bl_get_brightness,
45636 .update_status = riva_bl_update_status,
45637 };
45638diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
45639index 54fbb29..2c108fc 100644
45640--- a/drivers/video/uvesafb.c
45641+++ b/drivers/video/uvesafb.c
45642@@ -18,6 +18,7 @@
45643 #include <linux/fb.h>
45644 #include <linux/io.h>
45645 #include <linux/mutex.h>
45646+#include <linux/moduleloader.h>
45647 #include <video/edid.h>
45648 #include <video/uvesafb.h>
45649 #ifdef CONFIG_X86
45650@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
45651 NULL,
45652 };
45653
45654- return call_usermodehelper(v86d_path, argv, envp, 1);
45655+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
45656 }
45657
45658 /*
45659@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
45660 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
45661 par->pmi_setpal = par->ypan = 0;
45662 } else {
45663+
45664+#ifdef CONFIG_PAX_KERNEXEC
45665+#ifdef CONFIG_MODULES
45666+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
45667+#endif
45668+ if (!par->pmi_code) {
45669+ par->pmi_setpal = par->ypan = 0;
45670+ return 0;
45671+ }
45672+#endif
45673+
45674 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
45675 + task->t.regs.edi);
45676+
45677+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45678+ pax_open_kernel();
45679+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
45680+ pax_close_kernel();
45681+
45682+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
45683+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
45684+#else
45685 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
45686 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
45687+#endif
45688+
45689 printk(KERN_INFO "uvesafb: protected mode interface info at "
45690 "%04x:%04x\n",
45691 (u16)task->t.regs.es, (u16)task->t.regs.edi);
45692@@ -1799,6 +1822,11 @@ out:
45693 if (par->vbe_modes)
45694 kfree(par->vbe_modes);
45695
45696+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45697+ if (par->pmi_code)
45698+ module_free_exec(NULL, par->pmi_code);
45699+#endif
45700+
45701 framebuffer_release(info);
45702 return err;
45703 }
45704@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
45705 kfree(par->vbe_state_orig);
45706 if (par->vbe_state_saved)
45707 kfree(par->vbe_state_saved);
45708+
45709+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45710+ if (par->pmi_code)
45711+ module_free_exec(NULL, par->pmi_code);
45712+#endif
45713+
45714 }
45715
45716 framebuffer_release(info);
45717diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
45718index bd37ee1..cb827e8 100644
45719--- a/drivers/video/vesafb.c
45720+++ b/drivers/video/vesafb.c
45721@@ -9,6 +9,7 @@
45722 */
45723
45724 #include <linux/module.h>
45725+#include <linux/moduleloader.h>
45726 #include <linux/kernel.h>
45727 #include <linux/errno.h>
45728 #include <linux/string.h>
45729@@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
45730 static int vram_total __initdata; /* Set total amount of memory */
45731 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
45732 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
45733-static void (*pmi_start)(void) __read_mostly;
45734-static void (*pmi_pal) (void) __read_mostly;
45735+static void (*pmi_start)(void) __read_only;
45736+static void (*pmi_pal) (void) __read_only;
45737 static int depth __read_mostly;
45738 static int vga_compat __read_mostly;
45739 /* --------------------------------------------------------------------- */
45740@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
45741 unsigned int size_vmode;
45742 unsigned int size_remap;
45743 unsigned int size_total;
45744+ void *pmi_code = NULL;
45745
45746 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
45747 return -ENODEV;
45748@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
45749 size_remap = size_total;
45750 vesafb_fix.smem_len = size_remap;
45751
45752-#ifndef __i386__
45753- screen_info.vesapm_seg = 0;
45754-#endif
45755-
45756 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
45757 printk(KERN_WARNING
45758 "vesafb: cannot reserve video memory at 0x%lx\n",
45759@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
45760 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
45761 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
45762
45763+#ifdef __i386__
45764+
45765+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45766+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
45767+ if (!pmi_code)
45768+#elif !defined(CONFIG_PAX_KERNEXEC)
45769+ if (0)
45770+#endif
45771+
45772+#endif
45773+ screen_info.vesapm_seg = 0;
45774+
45775 if (screen_info.vesapm_seg) {
45776- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
45777- screen_info.vesapm_seg,screen_info.vesapm_off);
45778+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
45779+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
45780 }
45781
45782 if (screen_info.vesapm_seg < 0xc000)
45783@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
45784
45785 if (ypan || pmi_setpal) {
45786 unsigned short *pmi_base;
45787+
45788 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
45789- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
45790- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
45791+
45792+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45793+ pax_open_kernel();
45794+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
45795+#else
45796+ pmi_code = pmi_base;
45797+#endif
45798+
45799+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
45800+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
45801+
45802+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45803+ pmi_start = ktva_ktla(pmi_start);
45804+ pmi_pal = ktva_ktla(pmi_pal);
45805+ pax_close_kernel();
45806+#endif
45807+
45808 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
45809 if (pmi_base[3]) {
45810 printk(KERN_INFO "vesafb: pmi: ports = ");
45811@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
45812 info->node, info->fix.id);
45813 return 0;
45814 err:
45815+
45816+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45817+ module_free_exec(NULL, pmi_code);
45818+#endif
45819+
45820 if (info->screen_base)
45821 iounmap(info->screen_base);
45822 framebuffer_release(info);
45823diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
45824index 88a60e0..6783cc2 100644
45825--- a/drivers/xen/sys-hypervisor.c
45826+++ b/drivers/xen/sys-hypervisor.c
45827@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
45828 return 0;
45829 }
45830
45831-static struct sysfs_ops hyp_sysfs_ops = {
45832+static const struct sysfs_ops hyp_sysfs_ops = {
45833 .show = hyp_sysfs_show,
45834 .store = hyp_sysfs_store,
45835 };
45836diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
45837index 18f74ec..3227009 100644
45838--- a/fs/9p/vfs_inode.c
45839+++ b/fs/9p/vfs_inode.c
45840@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
45841 static void
45842 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45843 {
45844- char *s = nd_get_link(nd);
45845+ const char *s = nd_get_link(nd);
45846
45847 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
45848 IS_ERR(s) ? "<error>" : s);
45849diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
45850index bb4cc5b..df5eaa0 100644
45851--- a/fs/Kconfig.binfmt
45852+++ b/fs/Kconfig.binfmt
45853@@ -86,7 +86,7 @@ config HAVE_AOUT
45854
45855 config BINFMT_AOUT
45856 tristate "Kernel support for a.out and ECOFF binaries"
45857- depends on HAVE_AOUT
45858+ depends on HAVE_AOUT && BROKEN
45859 ---help---
45860 A.out (Assembler.OUTput) is a set of formats for libraries and
45861 executables used in the earliest versions of UNIX. Linux used
45862diff --git a/fs/aio.c b/fs/aio.c
45863index 22a19ad..d484e5b 100644
45864--- a/fs/aio.c
45865+++ b/fs/aio.c
45866@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
45867 size += sizeof(struct io_event) * nr_events;
45868 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
45869
45870- if (nr_pages < 0)
45871+ if (nr_pages <= 0)
45872 return -EINVAL;
45873
45874 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
45875@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
45876 struct aio_timeout to;
45877 int retry = 0;
45878
45879+ pax_track_stack();
45880+
45881 /* needed to zero any padding within an entry (there shouldn't be
45882 * any, but C is fun!
45883 */
45884@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
45885 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
45886 {
45887 ssize_t ret;
45888+ struct iovec iovstack;
45889
45890 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
45891 kiocb->ki_nbytes, 1,
45892- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
45893+ &iovstack, &kiocb->ki_iovec);
45894 if (ret < 0)
45895 goto out;
45896
45897+ if (kiocb->ki_iovec == &iovstack) {
45898+ kiocb->ki_inline_vec = iovstack;
45899+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
45900+ }
45901 kiocb->ki_nr_segs = kiocb->ki_nbytes;
45902 kiocb->ki_cur_seg = 0;
45903 /* ki_nbytes/left now reflect bytes instead of segs */
45904diff --git a/fs/attr.c b/fs/attr.c
45905index 96d394b..33cf5b4 100644
45906--- a/fs/attr.c
45907+++ b/fs/attr.c
45908@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
45909 unsigned long limit;
45910
45911 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
45912+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
45913 if (limit != RLIM_INFINITY && offset > limit)
45914 goto out_sig;
45915 if (offset > inode->i_sb->s_maxbytes)
45916diff --git a/fs/autofs/root.c b/fs/autofs/root.c
45917index 4a1401c..05eb5ca 100644
45918--- a/fs/autofs/root.c
45919+++ b/fs/autofs/root.c
45920@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
45921 set_bit(n,sbi->symlink_bitmap);
45922 sl = &sbi->symlink[n];
45923 sl->len = strlen(symname);
45924- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
45925+ slsize = sl->len+1;
45926+ sl->data = kmalloc(slsize, GFP_KERNEL);
45927 if (!sl->data) {
45928 clear_bit(n,sbi->symlink_bitmap);
45929 unlock_kernel();
45930diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
45931index b4ea829..e63ef18 100644
45932--- a/fs/autofs4/symlink.c
45933+++ b/fs/autofs4/symlink.c
45934@@ -15,7 +15,7 @@
45935 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
45936 {
45937 struct autofs_info *ino = autofs4_dentry_ino(dentry);
45938- nd_set_link(nd, (char *)ino->u.symlink);
45939+ nd_set_link(nd, ino->u.symlink);
45940 return NULL;
45941 }
45942
45943diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
45944index 2341375..df9d1c2 100644
45945--- a/fs/autofs4/waitq.c
45946+++ b/fs/autofs4/waitq.c
45947@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
45948 {
45949 unsigned long sigpipe, flags;
45950 mm_segment_t fs;
45951- const char *data = (const char *)addr;
45952+ const char __user *data = (const char __force_user *)addr;
45953 ssize_t wr = 0;
45954
45955 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
45956diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
45957index 9158c07..3f06659 100644
45958--- a/fs/befs/linuxvfs.c
45959+++ b/fs/befs/linuxvfs.c
45960@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45961 {
45962 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
45963 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
45964- char *link = nd_get_link(nd);
45965+ const char *link = nd_get_link(nd);
45966 if (!IS_ERR(link))
45967 kfree(link);
45968 }
45969diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
45970index 0133b5a..b3baa9f 100644
45971--- a/fs/binfmt_aout.c
45972+++ b/fs/binfmt_aout.c
45973@@ -16,6 +16,7 @@
45974 #include <linux/string.h>
45975 #include <linux/fs.h>
45976 #include <linux/file.h>
45977+#include <linux/security.h>
45978 #include <linux/stat.h>
45979 #include <linux/fcntl.h>
45980 #include <linux/ptrace.h>
45981@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
45982 #endif
45983 # define START_STACK(u) (u.start_stack)
45984
45985+ memset(&dump, 0, sizeof(dump));
45986+
45987 fs = get_fs();
45988 set_fs(KERNEL_DS);
45989 has_dumped = 1;
45990@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
45991
45992 /* If the size of the dump file exceeds the rlimit, then see what would happen
45993 if we wrote the stack, but not the data area. */
45994+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
45995 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
45996 dump.u_dsize = 0;
45997
45998 /* Make sure we have enough room to write the stack and data areas. */
45999+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
46000 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
46001 dump.u_ssize = 0;
46002
46003@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46004 dump_size = dump.u_ssize << PAGE_SHIFT;
46005 DUMP_WRITE(dump_start,dump_size);
46006 }
46007-/* Finally dump the task struct. Not be used by gdb, but could be useful */
46008- set_fs(KERNEL_DS);
46009- DUMP_WRITE(current,sizeof(*current));
46010+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
46011 end_coredump:
46012 set_fs(fs);
46013 return has_dumped;
46014@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46015 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
46016 if (rlim >= RLIM_INFINITY)
46017 rlim = ~0;
46018+
46019+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
46020 if (ex.a_data + ex.a_bss > rlim)
46021 return -ENOMEM;
46022
46023@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46024 install_exec_creds(bprm);
46025 current->flags &= ~PF_FORKNOEXEC;
46026
46027+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46028+ current->mm->pax_flags = 0UL;
46029+#endif
46030+
46031+#ifdef CONFIG_PAX_PAGEEXEC
46032+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
46033+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
46034+
46035+#ifdef CONFIG_PAX_EMUTRAMP
46036+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
46037+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
46038+#endif
46039+
46040+#ifdef CONFIG_PAX_MPROTECT
46041+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
46042+ current->mm->pax_flags |= MF_PAX_MPROTECT;
46043+#endif
46044+
46045+ }
46046+#endif
46047+
46048 if (N_MAGIC(ex) == OMAGIC) {
46049 unsigned long text_addr, map_size;
46050 loff_t pos;
46051@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46052
46053 down_write(&current->mm->mmap_sem);
46054 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
46055- PROT_READ | PROT_WRITE | PROT_EXEC,
46056+ PROT_READ | PROT_WRITE,
46057 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
46058 fd_offset + ex.a_text);
46059 up_write(&current->mm->mmap_sem);
46060diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
46061index 1ed37ba..32cc555 100644
46062--- a/fs/binfmt_elf.c
46063+++ b/fs/binfmt_elf.c
46064@@ -50,6 +50,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46065 #define elf_core_dump NULL
46066 #endif
46067
46068+#ifdef CONFIG_PAX_MPROTECT
46069+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
46070+#endif
46071+
46072 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
46073 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
46074 #else
46075@@ -69,6 +73,11 @@ static struct linux_binfmt elf_format = {
46076 .load_binary = load_elf_binary,
46077 .load_shlib = load_elf_library,
46078 .core_dump = elf_core_dump,
46079+
46080+#ifdef CONFIG_PAX_MPROTECT
46081+ .handle_mprotect= elf_handle_mprotect,
46082+#endif
46083+
46084 .min_coredump = ELF_EXEC_PAGESIZE,
46085 .hasvdso = 1
46086 };
46087@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format = {
46088
46089 static int set_brk(unsigned long start, unsigned long end)
46090 {
46091+ unsigned long e = end;
46092+
46093 start = ELF_PAGEALIGN(start);
46094 end = ELF_PAGEALIGN(end);
46095 if (end > start) {
46096@@ -87,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
46097 if (BAD_ADDR(addr))
46098 return addr;
46099 }
46100- current->mm->start_brk = current->mm->brk = end;
46101+ current->mm->start_brk = current->mm->brk = e;
46102 return 0;
46103 }
46104
46105@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46106 elf_addr_t __user *u_rand_bytes;
46107 const char *k_platform = ELF_PLATFORM;
46108 const char *k_base_platform = ELF_BASE_PLATFORM;
46109- unsigned char k_rand_bytes[16];
46110+ u32 k_rand_bytes[4];
46111 int items;
46112 elf_addr_t *elf_info;
46113 int ei_index = 0;
46114 const struct cred *cred = current_cred();
46115 struct vm_area_struct *vma;
46116+ unsigned long saved_auxv[AT_VECTOR_SIZE];
46117+
46118+ pax_track_stack();
46119
46120 /*
46121 * In some cases (e.g. Hyper-Threading), we want to avoid L1
46122@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46123 * Generate 16 random bytes for userspace PRNG seeding.
46124 */
46125 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
46126- u_rand_bytes = (elf_addr_t __user *)
46127- STACK_ALLOC(p, sizeof(k_rand_bytes));
46128+ srandom32(k_rand_bytes[0] ^ random32());
46129+ srandom32(k_rand_bytes[1] ^ random32());
46130+ srandom32(k_rand_bytes[2] ^ random32());
46131+ srandom32(k_rand_bytes[3] ^ random32());
46132+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
46133+ u_rand_bytes = (elf_addr_t __user *) p;
46134 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
46135 return -EFAULT;
46136
46137@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46138 return -EFAULT;
46139 current->mm->env_end = p;
46140
46141+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
46142+
46143 /* Put the elf_info on the stack in the right place. */
46144 sp = (elf_addr_t __user *)envp + 1;
46145- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
46146+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
46147 return -EFAULT;
46148 return 0;
46149 }
46150@@ -385,10 +405,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46151 {
46152 struct elf_phdr *elf_phdata;
46153 struct elf_phdr *eppnt;
46154- unsigned long load_addr = 0;
46155+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
46156 int load_addr_set = 0;
46157 unsigned long last_bss = 0, elf_bss = 0;
46158- unsigned long error = ~0UL;
46159+ unsigned long error = -EINVAL;
46160 unsigned long total_size;
46161 int retval, i, size;
46162
46163@@ -434,6 +454,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46164 goto out_close;
46165 }
46166
46167+#ifdef CONFIG_PAX_SEGMEXEC
46168+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
46169+ pax_task_size = SEGMEXEC_TASK_SIZE;
46170+#endif
46171+
46172 eppnt = elf_phdata;
46173 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
46174 if (eppnt->p_type == PT_LOAD) {
46175@@ -477,8 +502,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46176 k = load_addr + eppnt->p_vaddr;
46177 if (BAD_ADDR(k) ||
46178 eppnt->p_filesz > eppnt->p_memsz ||
46179- eppnt->p_memsz > TASK_SIZE ||
46180- TASK_SIZE - eppnt->p_memsz < k) {
46181+ eppnt->p_memsz > pax_task_size ||
46182+ pax_task_size - eppnt->p_memsz < k) {
46183 error = -ENOMEM;
46184 goto out_close;
46185 }
46186@@ -532,6 +557,194 @@ out:
46187 return error;
46188 }
46189
46190+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
46191+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
46192+{
46193+ unsigned long pax_flags = 0UL;
46194+
46195+#ifdef CONFIG_PAX_PAGEEXEC
46196+ if (elf_phdata->p_flags & PF_PAGEEXEC)
46197+ pax_flags |= MF_PAX_PAGEEXEC;
46198+#endif
46199+
46200+#ifdef CONFIG_PAX_SEGMEXEC
46201+ if (elf_phdata->p_flags & PF_SEGMEXEC)
46202+ pax_flags |= MF_PAX_SEGMEXEC;
46203+#endif
46204+
46205+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46206+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46207+ if (nx_enabled)
46208+ pax_flags &= ~MF_PAX_SEGMEXEC;
46209+ else
46210+ pax_flags &= ~MF_PAX_PAGEEXEC;
46211+ }
46212+#endif
46213+
46214+#ifdef CONFIG_PAX_EMUTRAMP
46215+ if (elf_phdata->p_flags & PF_EMUTRAMP)
46216+ pax_flags |= MF_PAX_EMUTRAMP;
46217+#endif
46218+
46219+#ifdef CONFIG_PAX_MPROTECT
46220+ if (elf_phdata->p_flags & PF_MPROTECT)
46221+ pax_flags |= MF_PAX_MPROTECT;
46222+#endif
46223+
46224+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46225+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
46226+ pax_flags |= MF_PAX_RANDMMAP;
46227+#endif
46228+
46229+ return pax_flags;
46230+}
46231+#endif
46232+
46233+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46234+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
46235+{
46236+ unsigned long pax_flags = 0UL;
46237+
46238+#ifdef CONFIG_PAX_PAGEEXEC
46239+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
46240+ pax_flags |= MF_PAX_PAGEEXEC;
46241+#endif
46242+
46243+#ifdef CONFIG_PAX_SEGMEXEC
46244+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
46245+ pax_flags |= MF_PAX_SEGMEXEC;
46246+#endif
46247+
46248+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46249+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46250+ if (nx_enabled)
46251+ pax_flags &= ~MF_PAX_SEGMEXEC;
46252+ else
46253+ pax_flags &= ~MF_PAX_PAGEEXEC;
46254+ }
46255+#endif
46256+
46257+#ifdef CONFIG_PAX_EMUTRAMP
46258+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
46259+ pax_flags |= MF_PAX_EMUTRAMP;
46260+#endif
46261+
46262+#ifdef CONFIG_PAX_MPROTECT
46263+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
46264+ pax_flags |= MF_PAX_MPROTECT;
46265+#endif
46266+
46267+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46268+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
46269+ pax_flags |= MF_PAX_RANDMMAP;
46270+#endif
46271+
46272+ return pax_flags;
46273+}
46274+#endif
46275+
46276+#ifdef CONFIG_PAX_EI_PAX
46277+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
46278+{
46279+ unsigned long pax_flags = 0UL;
46280+
46281+#ifdef CONFIG_PAX_PAGEEXEC
46282+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
46283+ pax_flags |= MF_PAX_PAGEEXEC;
46284+#endif
46285+
46286+#ifdef CONFIG_PAX_SEGMEXEC
46287+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
46288+ pax_flags |= MF_PAX_SEGMEXEC;
46289+#endif
46290+
46291+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46292+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46293+ if (nx_enabled)
46294+ pax_flags &= ~MF_PAX_SEGMEXEC;
46295+ else
46296+ pax_flags &= ~MF_PAX_PAGEEXEC;
46297+ }
46298+#endif
46299+
46300+#ifdef CONFIG_PAX_EMUTRAMP
46301+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
46302+ pax_flags |= MF_PAX_EMUTRAMP;
46303+#endif
46304+
46305+#ifdef CONFIG_PAX_MPROTECT
46306+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
46307+ pax_flags |= MF_PAX_MPROTECT;
46308+#endif
46309+
46310+#ifdef CONFIG_PAX_ASLR
46311+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
46312+ pax_flags |= MF_PAX_RANDMMAP;
46313+#endif
46314+
46315+ return pax_flags;
46316+}
46317+#endif
46318+
46319+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
46320+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
46321+{
46322+ unsigned long pax_flags = 0UL;
46323+
46324+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46325+ unsigned long i;
46326+ int found_flags = 0;
46327+#endif
46328+
46329+#ifdef CONFIG_PAX_EI_PAX
46330+ pax_flags = pax_parse_ei_pax(elf_ex);
46331+#endif
46332+
46333+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46334+ for (i = 0UL; i < elf_ex->e_phnum; i++)
46335+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
46336+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
46337+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
46338+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
46339+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
46340+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
46341+ return -EINVAL;
46342+
46343+#ifdef CONFIG_PAX_SOFTMODE
46344+ if (pax_softmode)
46345+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
46346+ else
46347+#endif
46348+
46349+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
46350+ found_flags = 1;
46351+ break;
46352+ }
46353+#endif
46354+
46355+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
46356+ if (found_flags == 0) {
46357+ struct elf_phdr phdr;
46358+ memset(&phdr, 0, sizeof(phdr));
46359+ phdr.p_flags = PF_NOEMUTRAMP;
46360+#ifdef CONFIG_PAX_SOFTMODE
46361+ if (pax_softmode)
46362+ pax_flags = pax_parse_softmode(&phdr);
46363+ else
46364+#endif
46365+ pax_flags = pax_parse_hardmode(&phdr);
46366+ }
46367+#endif
46368+
46369+
46370+ if (0 > pax_check_flags(&pax_flags))
46371+ return -EINVAL;
46372+
46373+ current->mm->pax_flags = pax_flags;
46374+ return 0;
46375+}
46376+#endif
46377+
46378 /*
46379 * These are the functions used to load ELF style executables and shared
46380 * libraries. There is no binary dependent code anywhere else.
46381@@ -548,6 +761,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
46382 {
46383 unsigned int random_variable = 0;
46384
46385+#ifdef CONFIG_PAX_RANDUSTACK
46386+ if (randomize_va_space)
46387+ return stack_top - current->mm->delta_stack;
46388+#endif
46389+
46390 if ((current->flags & PF_RANDOMIZE) &&
46391 !(current->personality & ADDR_NO_RANDOMIZE)) {
46392 random_variable = get_random_int() & STACK_RND_MASK;
46393@@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46394 unsigned long load_addr = 0, load_bias = 0;
46395 int load_addr_set = 0;
46396 char * elf_interpreter = NULL;
46397- unsigned long error;
46398+ unsigned long error = 0;
46399 struct elf_phdr *elf_ppnt, *elf_phdata;
46400 unsigned long elf_bss, elf_brk;
46401 int retval, i;
46402@@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46403 unsigned long start_code, end_code, start_data, end_data;
46404 unsigned long reloc_func_desc = 0;
46405 int executable_stack = EXSTACK_DEFAULT;
46406- unsigned long def_flags = 0;
46407 struct {
46408 struct elfhdr elf_ex;
46409 struct elfhdr interp_elf_ex;
46410 } *loc;
46411+ unsigned long pax_task_size = TASK_SIZE;
46412
46413 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
46414 if (!loc) {
46415@@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46416
46417 /* OK, This is the point of no return */
46418 current->flags &= ~PF_FORKNOEXEC;
46419- current->mm->def_flags = def_flags;
46420+
46421+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46422+ current->mm->pax_flags = 0UL;
46423+#endif
46424+
46425+#ifdef CONFIG_PAX_DLRESOLVE
46426+ current->mm->call_dl_resolve = 0UL;
46427+#endif
46428+
46429+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
46430+ current->mm->call_syscall = 0UL;
46431+#endif
46432+
46433+#ifdef CONFIG_PAX_ASLR
46434+ current->mm->delta_mmap = 0UL;
46435+ current->mm->delta_stack = 0UL;
46436+#endif
46437+
46438+ current->mm->def_flags = 0;
46439+
46440+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
46441+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
46442+ send_sig(SIGKILL, current, 0);
46443+ goto out_free_dentry;
46444+ }
46445+#endif
46446+
46447+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
46448+ pax_set_initial_flags(bprm);
46449+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
46450+ if (pax_set_initial_flags_func)
46451+ (pax_set_initial_flags_func)(bprm);
46452+#endif
46453+
46454+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46455+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
46456+ current->mm->context.user_cs_limit = PAGE_SIZE;
46457+ current->mm->def_flags |= VM_PAGEEXEC;
46458+ }
46459+#endif
46460+
46461+#ifdef CONFIG_PAX_SEGMEXEC
46462+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
46463+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
46464+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
46465+ pax_task_size = SEGMEXEC_TASK_SIZE;
46466+ }
46467+#endif
46468+
46469+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
46470+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46471+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
46472+ put_cpu();
46473+ }
46474+#endif
46475
46476 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
46477 may depend on the personality. */
46478 SET_PERSONALITY(loc->elf_ex);
46479+
46480+#ifdef CONFIG_PAX_ASLR
46481+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
46482+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
46483+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
46484+ }
46485+#endif
46486+
46487+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
46488+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46489+ executable_stack = EXSTACK_DISABLE_X;
46490+ current->personality &= ~READ_IMPLIES_EXEC;
46491+ } else
46492+#endif
46493+
46494 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
46495 current->personality |= READ_IMPLIES_EXEC;
46496
46497@@ -800,10 +1087,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46498 * might try to exec. This is because the brk will
46499 * follow the loader, and is not movable. */
46500 #ifdef CONFIG_X86
46501- load_bias = 0;
46502+ if (current->flags & PF_RANDOMIZE)
46503+ load_bias = 0;
46504+ else
46505+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
46506 #else
46507 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
46508 #endif
46509+
46510+#ifdef CONFIG_PAX_RANDMMAP
46511+ /* PaX: randomize base address at the default exe base if requested */
46512+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
46513+#ifdef CONFIG_SPARC64
46514+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
46515+#else
46516+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
46517+#endif
46518+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
46519+ elf_flags |= MAP_FIXED;
46520+ }
46521+#endif
46522+
46523 }
46524
46525 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
46526@@ -836,9 +1140,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46527 * allowed task size. Note that p_filesz must always be
46528 * <= p_memsz so it is only necessary to check p_memsz.
46529 */
46530- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
46531- elf_ppnt->p_memsz > TASK_SIZE ||
46532- TASK_SIZE - elf_ppnt->p_memsz < k) {
46533+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
46534+ elf_ppnt->p_memsz > pax_task_size ||
46535+ pax_task_size - elf_ppnt->p_memsz < k) {
46536 /* set_brk can never work. Avoid overflows. */
46537 send_sig(SIGKILL, current, 0);
46538 retval = -EINVAL;
46539@@ -866,6 +1170,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46540 start_data += load_bias;
46541 end_data += load_bias;
46542
46543+#ifdef CONFIG_PAX_RANDMMAP
46544+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
46545+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
46546+#endif
46547+
46548 /* Calling set_brk effectively mmaps the pages that we need
46549 * for the bss and break sections. We must do this before
46550 * mapping in the interpreter, to make sure it doesn't wind
46551@@ -877,9 +1186,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46552 goto out_free_dentry;
46553 }
46554 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
46555- send_sig(SIGSEGV, current, 0);
46556- retval = -EFAULT; /* Nobody gets to see this, but.. */
46557- goto out_free_dentry;
46558+ /*
46559+ * This bss-zeroing can fail if the ELF
46560+ * file specifies odd protections. So
46561+ * we don't check the return value
46562+ */
46563 }
46564
46565 if (elf_interpreter) {
46566@@ -1112,8 +1423,10 @@ static int dump_seek(struct file *file, loff_t off)
46567 unsigned long n = off;
46568 if (n > PAGE_SIZE)
46569 n = PAGE_SIZE;
46570- if (!dump_write(file, buf, n))
46571+ if (!dump_write(file, buf, n)) {
46572+ free_page((unsigned long)buf);
46573 return 0;
46574+ }
46575 off -= n;
46576 }
46577 free_page((unsigned long)buf);
46578@@ -1125,7 +1438,7 @@ static int dump_seek(struct file *file, loff_t off)
46579 * Decide what to dump of a segment, part, all or none.
46580 */
46581 static unsigned long vma_dump_size(struct vm_area_struct *vma,
46582- unsigned long mm_flags)
46583+ unsigned long mm_flags, long signr)
46584 {
46585 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
46586
46587@@ -1159,7 +1472,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
46588 if (vma->vm_file == NULL)
46589 return 0;
46590
46591- if (FILTER(MAPPED_PRIVATE))
46592+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
46593 goto whole;
46594
46595 /*
46596@@ -1255,8 +1568,11 @@ static int writenote(struct memelfnote *men, struct file *file,
46597 #undef DUMP_WRITE
46598
46599 #define DUMP_WRITE(addr, nr) \
46600+ do { \
46601+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
46602 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
46603- goto end_coredump;
46604+ goto end_coredump; \
46605+ } while (0);
46606
46607 static void fill_elf_header(struct elfhdr *elf, int segs,
46608 u16 machine, u32 flags, u8 osabi)
46609@@ -1385,9 +1701,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
46610 {
46611 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
46612 int i = 0;
46613- do
46614+ do {
46615 i += 2;
46616- while (auxv[i - 2] != AT_NULL);
46617+ } while (auxv[i - 2] != AT_NULL);
46618 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
46619 }
46620
46621@@ -1973,7 +2289,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46622 phdr.p_offset = offset;
46623 phdr.p_vaddr = vma->vm_start;
46624 phdr.p_paddr = 0;
46625- phdr.p_filesz = vma_dump_size(vma, mm_flags);
46626+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
46627 phdr.p_memsz = vma->vm_end - vma->vm_start;
46628 offset += phdr.p_filesz;
46629 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
46630@@ -2006,7 +2322,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46631 unsigned long addr;
46632 unsigned long end;
46633
46634- end = vma->vm_start + vma_dump_size(vma, mm_flags);
46635+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
46636
46637 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
46638 struct page *page;
46639@@ -2015,6 +2331,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46640 page = get_dump_page(addr);
46641 if (page) {
46642 void *kaddr = kmap(page);
46643+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
46644 stop = ((size += PAGE_SIZE) > limit) ||
46645 !dump_write(file, kaddr, PAGE_SIZE);
46646 kunmap(page);
46647@@ -2042,6 +2359,97 @@ out:
46648
46649 #endif /* USE_ELF_CORE_DUMP */
46650
46651+#ifdef CONFIG_PAX_MPROTECT
46652+/* PaX: non-PIC ELF libraries need relocations on their executable segments
46653+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
46654+ * we'll remove VM_MAYWRITE for good on RELRO segments.
46655+ *
46656+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
46657+ * basis because we want to allow the common case and not the special ones.
46658+ */
46659+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
46660+{
46661+ struct elfhdr elf_h;
46662+ struct elf_phdr elf_p;
46663+ unsigned long i;
46664+ unsigned long oldflags;
46665+ bool is_textrel_rw, is_textrel_rx, is_relro;
46666+
46667+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
46668+ return;
46669+
46670+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
46671+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
46672+
46673+#ifdef CONFIG_PAX_ELFRELOCS
46674+ /* possible TEXTREL */
46675+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
46676+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
46677+#else
46678+ is_textrel_rw = false;
46679+ is_textrel_rx = false;
46680+#endif
46681+
46682+ /* possible RELRO */
46683+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
46684+
46685+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
46686+ return;
46687+
46688+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
46689+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
46690+
46691+#ifdef CONFIG_PAX_ETEXECRELOCS
46692+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
46693+#else
46694+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
46695+#endif
46696+
46697+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
46698+ !elf_check_arch(&elf_h) ||
46699+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
46700+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
46701+ return;
46702+
46703+ for (i = 0UL; i < elf_h.e_phnum; i++) {
46704+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
46705+ return;
46706+ switch (elf_p.p_type) {
46707+ case PT_DYNAMIC:
46708+ if (!is_textrel_rw && !is_textrel_rx)
46709+ continue;
46710+ i = 0UL;
46711+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
46712+ elf_dyn dyn;
46713+
46714+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
46715+ return;
46716+ if (dyn.d_tag == DT_NULL)
46717+ return;
46718+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
46719+ gr_log_textrel(vma);
46720+ if (is_textrel_rw)
46721+ vma->vm_flags |= VM_MAYWRITE;
46722+ else
46723+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
46724+ vma->vm_flags &= ~VM_MAYWRITE;
46725+ return;
46726+ }
46727+ i++;
46728+ }
46729+ return;
46730+
46731+ case PT_GNU_RELRO:
46732+ if (!is_relro)
46733+ continue;
46734+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
46735+ vma->vm_flags &= ~VM_MAYWRITE;
46736+ return;
46737+ }
46738+ }
46739+}
46740+#endif
46741+
46742 static int __init init_elf_binfmt(void)
46743 {
46744 return register_binfmt(&elf_format);
46745diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
46746index ca88c46..f155a60 100644
46747--- a/fs/binfmt_flat.c
46748+++ b/fs/binfmt_flat.c
46749@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
46750 realdatastart = (unsigned long) -ENOMEM;
46751 printk("Unable to allocate RAM for process data, errno %d\n",
46752 (int)-realdatastart);
46753+ down_write(&current->mm->mmap_sem);
46754 do_munmap(current->mm, textpos, text_len);
46755+ up_write(&current->mm->mmap_sem);
46756 ret = realdatastart;
46757 goto err;
46758 }
46759@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
46760 }
46761 if (IS_ERR_VALUE(result)) {
46762 printk("Unable to read data+bss, errno %d\n", (int)-result);
46763+ down_write(&current->mm->mmap_sem);
46764 do_munmap(current->mm, textpos, text_len);
46765 do_munmap(current->mm, realdatastart, data_len + extra);
46766+ up_write(&current->mm->mmap_sem);
46767 ret = result;
46768 goto err;
46769 }
46770@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
46771 }
46772 if (IS_ERR_VALUE(result)) {
46773 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
46774+ down_write(&current->mm->mmap_sem);
46775 do_munmap(current->mm, textpos, text_len + data_len + extra +
46776 MAX_SHARED_LIBS * sizeof(unsigned long));
46777+ up_write(&current->mm->mmap_sem);
46778 ret = result;
46779 goto err;
46780 }
46781diff --git a/fs/bio.c b/fs/bio.c
46782index e696713..83de133 100644
46783--- a/fs/bio.c
46784+++ b/fs/bio.c
46785@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
46786
46787 i = 0;
46788 while (i < bio_slab_nr) {
46789- struct bio_slab *bslab = &bio_slabs[i];
46790+ bslab = &bio_slabs[i];
46791
46792 if (!bslab->slab && entry == -1)
46793 entry = i;
46794@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
46795 const int read = bio_data_dir(bio) == READ;
46796 struct bio_map_data *bmd = bio->bi_private;
46797 int i;
46798- char *p = bmd->sgvecs[0].iov_base;
46799+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
46800
46801 __bio_for_each_segment(bvec, bio, i, 0) {
46802 char *addr = page_address(bvec->bv_page);
46803diff --git a/fs/block_dev.c b/fs/block_dev.c
46804index e65efa2..04fae57 100644
46805--- a/fs/block_dev.c
46806+++ b/fs/block_dev.c
46807@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
46808 else if (bdev->bd_contains == bdev)
46809 res = 0; /* is a whole device which isn't held */
46810
46811- else if (bdev->bd_contains->bd_holder == bd_claim)
46812+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
46813 res = 0; /* is a partition of a device that is being partitioned */
46814 else if (bdev->bd_contains->bd_holder != NULL)
46815 res = -EBUSY; /* is a partition of a held device */
46816diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
46817index c4bc570..42acd8d 100644
46818--- a/fs/btrfs/ctree.c
46819+++ b/fs/btrfs/ctree.c
46820@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
46821 free_extent_buffer(buf);
46822 add_root_to_dirty_list(root);
46823 } else {
46824- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
46825- parent_start = parent->start;
46826- else
46827+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
46828+ if (parent)
46829+ parent_start = parent->start;
46830+ else
46831+ parent_start = 0;
46832+ } else
46833 parent_start = 0;
46834
46835 WARN_ON(trans->transid != btrfs_header_generation(parent));
46836@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
46837
46838 ret = 0;
46839 if (slot == 0) {
46840- struct btrfs_disk_key disk_key;
46841 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
46842 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
46843 }
46844diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
46845index f447188..59c17c5 100644
46846--- a/fs/btrfs/disk-io.c
46847+++ b/fs/btrfs/disk-io.c
46848@@ -39,7 +39,7 @@
46849 #include "tree-log.h"
46850 #include "free-space-cache.h"
46851
46852-static struct extent_io_ops btree_extent_io_ops;
46853+static const struct extent_io_ops btree_extent_io_ops;
46854 static void end_workqueue_fn(struct btrfs_work *work);
46855 static void free_fs_root(struct btrfs_root *root);
46856
46857@@ -2607,7 +2607,7 @@ out:
46858 return 0;
46859 }
46860
46861-static struct extent_io_ops btree_extent_io_ops = {
46862+static const struct extent_io_ops btree_extent_io_ops = {
46863 .write_cache_pages_lock_hook = btree_lock_page_hook,
46864 .readpage_end_io_hook = btree_readpage_end_io_hook,
46865 .submit_bio_hook = btree_submit_bio_hook,
46866diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
46867index 559f724..a026171 100644
46868--- a/fs/btrfs/extent-tree.c
46869+++ b/fs/btrfs/extent-tree.c
46870@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
46871 u64 group_start = group->key.objectid;
46872 new_extents = kmalloc(sizeof(*new_extents),
46873 GFP_NOFS);
46874+ if (!new_extents) {
46875+ ret = -ENOMEM;
46876+ goto out;
46877+ }
46878 nr_extents = 1;
46879 ret = get_new_locations(reloc_inode,
46880 extent_key,
46881diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
46882index 36de250..7ec75c7 100644
46883--- a/fs/btrfs/extent_io.h
46884+++ b/fs/btrfs/extent_io.h
46885@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
46886 struct bio *bio, int mirror_num,
46887 unsigned long bio_flags);
46888 struct extent_io_ops {
46889- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
46890+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
46891 u64 start, u64 end, int *page_started,
46892 unsigned long *nr_written);
46893- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
46894- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
46895+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
46896+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
46897 extent_submit_bio_hook_t *submit_bio_hook;
46898- int (*merge_bio_hook)(struct page *page, unsigned long offset,
46899+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
46900 size_t size, struct bio *bio,
46901 unsigned long bio_flags);
46902- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
46903- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
46904+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
46905+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
46906 u64 start, u64 end,
46907 struct extent_state *state);
46908- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
46909+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
46910 u64 start, u64 end,
46911 struct extent_state *state);
46912- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
46913+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
46914 struct extent_state *state);
46915- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
46916+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
46917 struct extent_state *state, int uptodate);
46918- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
46919+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
46920 unsigned long old, unsigned long bits);
46921- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
46922+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
46923 unsigned long bits);
46924- int (*merge_extent_hook)(struct inode *inode,
46925+ int (* const merge_extent_hook)(struct inode *inode,
46926 struct extent_state *new,
46927 struct extent_state *other);
46928- int (*split_extent_hook)(struct inode *inode,
46929+ int (* const split_extent_hook)(struct inode *inode,
46930 struct extent_state *orig, u64 split);
46931- int (*write_cache_pages_lock_hook)(struct page *page);
46932+ int (* const write_cache_pages_lock_hook)(struct page *page);
46933 };
46934
46935 struct extent_io_tree {
46936@@ -88,7 +88,7 @@ struct extent_io_tree {
46937 u64 dirty_bytes;
46938 spinlock_t lock;
46939 spinlock_t buffer_lock;
46940- struct extent_io_ops *ops;
46941+ const struct extent_io_ops *ops;
46942 };
46943
46944 struct extent_state {
46945diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
46946index cb2849f..3718fb4 100644
46947--- a/fs/btrfs/free-space-cache.c
46948+++ b/fs/btrfs/free-space-cache.c
46949@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
46950
46951 while(1) {
46952 if (entry->bytes < bytes || entry->offset < min_start) {
46953- struct rb_node *node;
46954-
46955 node = rb_next(&entry->offset_index);
46956 if (!node)
46957 break;
46958@@ -1226,7 +1224,7 @@ again:
46959 */
46960 while (entry->bitmap || found_bitmap ||
46961 (!entry->bitmap && entry->bytes < min_bytes)) {
46962- struct rb_node *node = rb_next(&entry->offset_index);
46963+ node = rb_next(&entry->offset_index);
46964
46965 if (entry->bitmap && entry->bytes > bytes + empty_size) {
46966 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
46967diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
46968index e03a836..323837e 100644
46969--- a/fs/btrfs/inode.c
46970+++ b/fs/btrfs/inode.c
46971@@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
46972 static const struct address_space_operations btrfs_aops;
46973 static const struct address_space_operations btrfs_symlink_aops;
46974 static const struct file_operations btrfs_dir_file_operations;
46975-static struct extent_io_ops btrfs_extent_io_ops;
46976+static const struct extent_io_ops btrfs_extent_io_ops;
46977
46978 static struct kmem_cache *btrfs_inode_cachep;
46979 struct kmem_cache *btrfs_trans_handle_cachep;
46980@@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
46981 1, 0, NULL, GFP_NOFS);
46982 while (start < end) {
46983 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
46984+ BUG_ON(!async_cow);
46985 async_cow->inode = inode;
46986 async_cow->root = root;
46987 async_cow->locked_page = locked_page;
46988@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
46989 inline_size = btrfs_file_extent_inline_item_len(leaf,
46990 btrfs_item_nr(leaf, path->slots[0]));
46991 tmp = kmalloc(inline_size, GFP_NOFS);
46992+ if (!tmp)
46993+ return -ENOMEM;
46994 ptr = btrfs_file_extent_inline_start(item);
46995
46996 read_extent_buffer(leaf, tmp, ptr, inline_size);
46997@@ -5410,7 +5413,7 @@ fail:
46998 return -ENOMEM;
46999 }
47000
47001-static int btrfs_getattr(struct vfsmount *mnt,
47002+int btrfs_getattr(struct vfsmount *mnt,
47003 struct dentry *dentry, struct kstat *stat)
47004 {
47005 struct inode *inode = dentry->d_inode;
47006@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
47007 return 0;
47008 }
47009
47010+EXPORT_SYMBOL(btrfs_getattr);
47011+
47012+dev_t get_btrfs_dev_from_inode(struct inode *inode)
47013+{
47014+ return BTRFS_I(inode)->root->anon_super.s_dev;
47015+}
47016+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
47017+
47018 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
47019 struct inode *new_dir, struct dentry *new_dentry)
47020 {
47021@@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
47022 .fsync = btrfs_sync_file,
47023 };
47024
47025-static struct extent_io_ops btrfs_extent_io_ops = {
47026+static const struct extent_io_ops btrfs_extent_io_ops = {
47027 .fill_delalloc = run_delalloc_range,
47028 .submit_bio_hook = btrfs_submit_bio_hook,
47029 .merge_bio_hook = btrfs_merge_bio_hook,
47030diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
47031index ab7ab53..94e0781 100644
47032--- a/fs/btrfs/relocation.c
47033+++ b/fs/btrfs/relocation.c
47034@@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
47035 }
47036 spin_unlock(&rc->reloc_root_tree.lock);
47037
47038- BUG_ON((struct btrfs_root *)node->data != root);
47039+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
47040
47041 if (!del) {
47042 spin_lock(&rc->reloc_root_tree.lock);
47043diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
47044index a240b6f..4ce16ef 100644
47045--- a/fs/btrfs/sysfs.c
47046+++ b/fs/btrfs/sysfs.c
47047@@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
47048 complete(&root->kobj_unregister);
47049 }
47050
47051-static struct sysfs_ops btrfs_super_attr_ops = {
47052+static const struct sysfs_ops btrfs_super_attr_ops = {
47053 .show = btrfs_super_attr_show,
47054 .store = btrfs_super_attr_store,
47055 };
47056
47057-static struct sysfs_ops btrfs_root_attr_ops = {
47058+static const struct sysfs_ops btrfs_root_attr_ops = {
47059 .show = btrfs_root_attr_show,
47060 .store = btrfs_root_attr_store,
47061 };
47062diff --git a/fs/buffer.c b/fs/buffer.c
47063index 6fa5302..395d9f6 100644
47064--- a/fs/buffer.c
47065+++ b/fs/buffer.c
47066@@ -25,6 +25,7 @@
47067 #include <linux/percpu.h>
47068 #include <linux/slab.h>
47069 #include <linux/capability.h>
47070+#include <linux/security.h>
47071 #include <linux/blkdev.h>
47072 #include <linux/file.h>
47073 #include <linux/quotaops.h>
47074diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
47075index 3797e00..ce776f6 100644
47076--- a/fs/cachefiles/bind.c
47077+++ b/fs/cachefiles/bind.c
47078@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
47079 args);
47080
47081 /* start by checking things over */
47082- ASSERT(cache->fstop_percent >= 0 &&
47083- cache->fstop_percent < cache->fcull_percent &&
47084+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
47085 cache->fcull_percent < cache->frun_percent &&
47086 cache->frun_percent < 100);
47087
47088- ASSERT(cache->bstop_percent >= 0 &&
47089- cache->bstop_percent < cache->bcull_percent &&
47090+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
47091 cache->bcull_percent < cache->brun_percent &&
47092 cache->brun_percent < 100);
47093
47094diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
47095index 4618516..bb30d01 100644
47096--- a/fs/cachefiles/daemon.c
47097+++ b/fs/cachefiles/daemon.c
47098@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
47099 if (test_bit(CACHEFILES_DEAD, &cache->flags))
47100 return -EIO;
47101
47102- if (datalen < 0 || datalen > PAGE_SIZE - 1)
47103+ if (datalen > PAGE_SIZE - 1)
47104 return -EOPNOTSUPP;
47105
47106 /* drag the command string into the kernel so we can parse it */
47107@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
47108 if (args[0] != '%' || args[1] != '\0')
47109 return -EINVAL;
47110
47111- if (fstop < 0 || fstop >= cache->fcull_percent)
47112+ if (fstop >= cache->fcull_percent)
47113 return cachefiles_daemon_range_error(cache, args);
47114
47115 cache->fstop_percent = fstop;
47116@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
47117 if (args[0] != '%' || args[1] != '\0')
47118 return -EINVAL;
47119
47120- if (bstop < 0 || bstop >= cache->bcull_percent)
47121+ if (bstop >= cache->bcull_percent)
47122 return cachefiles_daemon_range_error(cache, args);
47123
47124 cache->bstop_percent = bstop;
47125diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
47126index f7c255f..fcd61de 100644
47127--- a/fs/cachefiles/internal.h
47128+++ b/fs/cachefiles/internal.h
47129@@ -56,7 +56,7 @@ struct cachefiles_cache {
47130 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
47131 struct rb_root active_nodes; /* active nodes (can't be culled) */
47132 rwlock_t active_lock; /* lock for active_nodes */
47133- atomic_t gravecounter; /* graveyard uniquifier */
47134+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
47135 unsigned frun_percent; /* when to stop culling (% files) */
47136 unsigned fcull_percent; /* when to start culling (% files) */
47137 unsigned fstop_percent; /* when to stop allocating (% files) */
47138@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
47139 * proc.c
47140 */
47141 #ifdef CONFIG_CACHEFILES_HISTOGRAM
47142-extern atomic_t cachefiles_lookup_histogram[HZ];
47143-extern atomic_t cachefiles_mkdir_histogram[HZ];
47144-extern atomic_t cachefiles_create_histogram[HZ];
47145+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47146+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47147+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
47148
47149 extern int __init cachefiles_proc_init(void);
47150 extern void cachefiles_proc_cleanup(void);
47151 static inline
47152-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
47153+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
47154 {
47155 unsigned long jif = jiffies - start_jif;
47156 if (jif >= HZ)
47157 jif = HZ - 1;
47158- atomic_inc(&histogram[jif]);
47159+ atomic_inc_unchecked(&histogram[jif]);
47160 }
47161
47162 #else
47163diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
47164index 14ac480..a62766c 100644
47165--- a/fs/cachefiles/namei.c
47166+++ b/fs/cachefiles/namei.c
47167@@ -250,7 +250,7 @@ try_again:
47168 /* first step is to make up a grave dentry in the graveyard */
47169 sprintf(nbuffer, "%08x%08x",
47170 (uint32_t) get_seconds(),
47171- (uint32_t) atomic_inc_return(&cache->gravecounter));
47172+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
47173
47174 /* do the multiway lock magic */
47175 trap = lock_rename(cache->graveyard, dir);
47176diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
47177index eccd339..4c1d995 100644
47178--- a/fs/cachefiles/proc.c
47179+++ b/fs/cachefiles/proc.c
47180@@ -14,9 +14,9 @@
47181 #include <linux/seq_file.h>
47182 #include "internal.h"
47183
47184-atomic_t cachefiles_lookup_histogram[HZ];
47185-atomic_t cachefiles_mkdir_histogram[HZ];
47186-atomic_t cachefiles_create_histogram[HZ];
47187+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47188+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47189+atomic_unchecked_t cachefiles_create_histogram[HZ];
47190
47191 /*
47192 * display the latency histogram
47193@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
47194 return 0;
47195 default:
47196 index = (unsigned long) v - 3;
47197- x = atomic_read(&cachefiles_lookup_histogram[index]);
47198- y = atomic_read(&cachefiles_mkdir_histogram[index]);
47199- z = atomic_read(&cachefiles_create_histogram[index]);
47200+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
47201+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
47202+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
47203 if (x == 0 && y == 0 && z == 0)
47204 return 0;
47205
47206diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
47207index a6c8c6f..5cf8517 100644
47208--- a/fs/cachefiles/rdwr.c
47209+++ b/fs/cachefiles/rdwr.c
47210@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
47211 old_fs = get_fs();
47212 set_fs(KERNEL_DS);
47213 ret = file->f_op->write(
47214- file, (const void __user *) data, len, &pos);
47215+ file, (const void __force_user *) data, len, &pos);
47216 set_fs(old_fs);
47217 kunmap(page);
47218 if (ret != len)
47219diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
47220index 42cec2a..2aba466 100644
47221--- a/fs/cifs/cifs_debug.c
47222+++ b/fs/cifs/cifs_debug.c
47223@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
47224 tcon = list_entry(tmp3,
47225 struct cifsTconInfo,
47226 tcon_list);
47227- atomic_set(&tcon->num_smbs_sent, 0);
47228- atomic_set(&tcon->num_writes, 0);
47229- atomic_set(&tcon->num_reads, 0);
47230- atomic_set(&tcon->num_oplock_brks, 0);
47231- atomic_set(&tcon->num_opens, 0);
47232- atomic_set(&tcon->num_posixopens, 0);
47233- atomic_set(&tcon->num_posixmkdirs, 0);
47234- atomic_set(&tcon->num_closes, 0);
47235- atomic_set(&tcon->num_deletes, 0);
47236- atomic_set(&tcon->num_mkdirs, 0);
47237- atomic_set(&tcon->num_rmdirs, 0);
47238- atomic_set(&tcon->num_renames, 0);
47239- atomic_set(&tcon->num_t2renames, 0);
47240- atomic_set(&tcon->num_ffirst, 0);
47241- atomic_set(&tcon->num_fnext, 0);
47242- atomic_set(&tcon->num_fclose, 0);
47243- atomic_set(&tcon->num_hardlinks, 0);
47244- atomic_set(&tcon->num_symlinks, 0);
47245- atomic_set(&tcon->num_locks, 0);
47246+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
47247+ atomic_set_unchecked(&tcon->num_writes, 0);
47248+ atomic_set_unchecked(&tcon->num_reads, 0);
47249+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
47250+ atomic_set_unchecked(&tcon->num_opens, 0);
47251+ atomic_set_unchecked(&tcon->num_posixopens, 0);
47252+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
47253+ atomic_set_unchecked(&tcon->num_closes, 0);
47254+ atomic_set_unchecked(&tcon->num_deletes, 0);
47255+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
47256+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
47257+ atomic_set_unchecked(&tcon->num_renames, 0);
47258+ atomic_set_unchecked(&tcon->num_t2renames, 0);
47259+ atomic_set_unchecked(&tcon->num_ffirst, 0);
47260+ atomic_set_unchecked(&tcon->num_fnext, 0);
47261+ atomic_set_unchecked(&tcon->num_fclose, 0);
47262+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
47263+ atomic_set_unchecked(&tcon->num_symlinks, 0);
47264+ atomic_set_unchecked(&tcon->num_locks, 0);
47265 }
47266 }
47267 }
47268@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
47269 if (tcon->need_reconnect)
47270 seq_puts(m, "\tDISCONNECTED ");
47271 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
47272- atomic_read(&tcon->num_smbs_sent),
47273- atomic_read(&tcon->num_oplock_brks));
47274+ atomic_read_unchecked(&tcon->num_smbs_sent),
47275+ atomic_read_unchecked(&tcon->num_oplock_brks));
47276 seq_printf(m, "\nReads: %d Bytes: %lld",
47277- atomic_read(&tcon->num_reads),
47278+ atomic_read_unchecked(&tcon->num_reads),
47279 (long long)(tcon->bytes_read));
47280 seq_printf(m, "\nWrites: %d Bytes: %lld",
47281- atomic_read(&tcon->num_writes),
47282+ atomic_read_unchecked(&tcon->num_writes),
47283 (long long)(tcon->bytes_written));
47284 seq_printf(m, "\nFlushes: %d",
47285- atomic_read(&tcon->num_flushes));
47286+ atomic_read_unchecked(&tcon->num_flushes));
47287 seq_printf(m, "\nLocks: %d HardLinks: %d "
47288 "Symlinks: %d",
47289- atomic_read(&tcon->num_locks),
47290- atomic_read(&tcon->num_hardlinks),
47291- atomic_read(&tcon->num_symlinks));
47292+ atomic_read_unchecked(&tcon->num_locks),
47293+ atomic_read_unchecked(&tcon->num_hardlinks),
47294+ atomic_read_unchecked(&tcon->num_symlinks));
47295 seq_printf(m, "\nOpens: %d Closes: %d "
47296 "Deletes: %d",
47297- atomic_read(&tcon->num_opens),
47298- atomic_read(&tcon->num_closes),
47299- atomic_read(&tcon->num_deletes));
47300+ atomic_read_unchecked(&tcon->num_opens),
47301+ atomic_read_unchecked(&tcon->num_closes),
47302+ atomic_read_unchecked(&tcon->num_deletes));
47303 seq_printf(m, "\nPosix Opens: %d "
47304 "Posix Mkdirs: %d",
47305- atomic_read(&tcon->num_posixopens),
47306- atomic_read(&tcon->num_posixmkdirs));
47307+ atomic_read_unchecked(&tcon->num_posixopens),
47308+ atomic_read_unchecked(&tcon->num_posixmkdirs));
47309 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
47310- atomic_read(&tcon->num_mkdirs),
47311- atomic_read(&tcon->num_rmdirs));
47312+ atomic_read_unchecked(&tcon->num_mkdirs),
47313+ atomic_read_unchecked(&tcon->num_rmdirs));
47314 seq_printf(m, "\nRenames: %d T2 Renames %d",
47315- atomic_read(&tcon->num_renames),
47316- atomic_read(&tcon->num_t2renames));
47317+ atomic_read_unchecked(&tcon->num_renames),
47318+ atomic_read_unchecked(&tcon->num_t2renames));
47319 seq_printf(m, "\nFindFirst: %d FNext %d "
47320 "FClose %d",
47321- atomic_read(&tcon->num_ffirst),
47322- atomic_read(&tcon->num_fnext),
47323- atomic_read(&tcon->num_fclose));
47324+ atomic_read_unchecked(&tcon->num_ffirst),
47325+ atomic_read_unchecked(&tcon->num_fnext),
47326+ atomic_read_unchecked(&tcon->num_fclose));
47327 }
47328 }
47329 }
47330diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
47331index 1445407..68cb0dc 100644
47332--- a/fs/cifs/cifsfs.c
47333+++ b/fs/cifs/cifsfs.c
47334@@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
47335 cifs_req_cachep = kmem_cache_create("cifs_request",
47336 CIFSMaxBufSize +
47337 MAX_CIFS_HDR_SIZE, 0,
47338- SLAB_HWCACHE_ALIGN, NULL);
47339+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
47340 if (cifs_req_cachep == NULL)
47341 return -ENOMEM;
47342
47343@@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
47344 efficient to alloc 1 per page off the slab compared to 17K (5page)
47345 alloc of large cifs buffers even when page debugging is on */
47346 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
47347- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
47348+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
47349 NULL);
47350 if (cifs_sm_req_cachep == NULL) {
47351 mempool_destroy(cifs_req_poolp);
47352@@ -991,8 +991,8 @@ init_cifs(void)
47353 atomic_set(&bufAllocCount, 0);
47354 atomic_set(&smBufAllocCount, 0);
47355 #ifdef CONFIG_CIFS_STATS2
47356- atomic_set(&totBufAllocCount, 0);
47357- atomic_set(&totSmBufAllocCount, 0);
47358+ atomic_set_unchecked(&totBufAllocCount, 0);
47359+ atomic_set_unchecked(&totSmBufAllocCount, 0);
47360 #endif /* CONFIG_CIFS_STATS2 */
47361
47362 atomic_set(&midCount, 0);
47363diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
47364index e29581e..1c22bab 100644
47365--- a/fs/cifs/cifsglob.h
47366+++ b/fs/cifs/cifsglob.h
47367@@ -252,28 +252,28 @@ struct cifsTconInfo {
47368 __u16 Flags; /* optional support bits */
47369 enum statusEnum tidStatus;
47370 #ifdef CONFIG_CIFS_STATS
47371- atomic_t num_smbs_sent;
47372- atomic_t num_writes;
47373- atomic_t num_reads;
47374- atomic_t num_flushes;
47375- atomic_t num_oplock_brks;
47376- atomic_t num_opens;
47377- atomic_t num_closes;
47378- atomic_t num_deletes;
47379- atomic_t num_mkdirs;
47380- atomic_t num_posixopens;
47381- atomic_t num_posixmkdirs;
47382- atomic_t num_rmdirs;
47383- atomic_t num_renames;
47384- atomic_t num_t2renames;
47385- atomic_t num_ffirst;
47386- atomic_t num_fnext;
47387- atomic_t num_fclose;
47388- atomic_t num_hardlinks;
47389- atomic_t num_symlinks;
47390- atomic_t num_locks;
47391- atomic_t num_acl_get;
47392- atomic_t num_acl_set;
47393+ atomic_unchecked_t num_smbs_sent;
47394+ atomic_unchecked_t num_writes;
47395+ atomic_unchecked_t num_reads;
47396+ atomic_unchecked_t num_flushes;
47397+ atomic_unchecked_t num_oplock_brks;
47398+ atomic_unchecked_t num_opens;
47399+ atomic_unchecked_t num_closes;
47400+ atomic_unchecked_t num_deletes;
47401+ atomic_unchecked_t num_mkdirs;
47402+ atomic_unchecked_t num_posixopens;
47403+ atomic_unchecked_t num_posixmkdirs;
47404+ atomic_unchecked_t num_rmdirs;
47405+ atomic_unchecked_t num_renames;
47406+ atomic_unchecked_t num_t2renames;
47407+ atomic_unchecked_t num_ffirst;
47408+ atomic_unchecked_t num_fnext;
47409+ atomic_unchecked_t num_fclose;
47410+ atomic_unchecked_t num_hardlinks;
47411+ atomic_unchecked_t num_symlinks;
47412+ atomic_unchecked_t num_locks;
47413+ atomic_unchecked_t num_acl_get;
47414+ atomic_unchecked_t num_acl_set;
47415 #ifdef CONFIG_CIFS_STATS2
47416 unsigned long long time_writes;
47417 unsigned long long time_reads;
47418@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
47419 }
47420
47421 #ifdef CONFIG_CIFS_STATS
47422-#define cifs_stats_inc atomic_inc
47423+#define cifs_stats_inc atomic_inc_unchecked
47424
47425 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
47426 unsigned int bytes)
47427@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
47428 /* Various Debug counters */
47429 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
47430 #ifdef CONFIG_CIFS_STATS2
47431-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
47432-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
47433+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
47434+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
47435 #endif
47436 GLOBAL_EXTERN atomic_t smBufAllocCount;
47437 GLOBAL_EXTERN atomic_t midCount;
47438diff --git a/fs/cifs/link.c b/fs/cifs/link.c
47439index fc1e048..28b3441 100644
47440--- a/fs/cifs/link.c
47441+++ b/fs/cifs/link.c
47442@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
47443
47444 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
47445 {
47446- char *p = nd_get_link(nd);
47447+ const char *p = nd_get_link(nd);
47448 if (!IS_ERR(p))
47449 kfree(p);
47450 }
47451diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
47452index d27d4ec..8d0a444 100644
47453--- a/fs/cifs/misc.c
47454+++ b/fs/cifs/misc.c
47455@@ -155,7 +155,7 @@ cifs_buf_get(void)
47456 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
47457 atomic_inc(&bufAllocCount);
47458 #ifdef CONFIG_CIFS_STATS2
47459- atomic_inc(&totBufAllocCount);
47460+ atomic_inc_unchecked(&totBufAllocCount);
47461 #endif /* CONFIG_CIFS_STATS2 */
47462 }
47463
47464@@ -190,7 +190,7 @@ cifs_small_buf_get(void)
47465 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
47466 atomic_inc(&smBufAllocCount);
47467 #ifdef CONFIG_CIFS_STATS2
47468- atomic_inc(&totSmBufAllocCount);
47469+ atomic_inc_unchecked(&totSmBufAllocCount);
47470 #endif /* CONFIG_CIFS_STATS2 */
47471
47472 }
47473diff --git a/fs/coda/cache.c b/fs/coda/cache.c
47474index a5bf577..6d19845 100644
47475--- a/fs/coda/cache.c
47476+++ b/fs/coda/cache.c
47477@@ -24,14 +24,14 @@
47478 #include <linux/coda_fs_i.h>
47479 #include <linux/coda_cache.h>
47480
47481-static atomic_t permission_epoch = ATOMIC_INIT(0);
47482+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
47483
47484 /* replace or extend an acl cache hit */
47485 void coda_cache_enter(struct inode *inode, int mask)
47486 {
47487 struct coda_inode_info *cii = ITOC(inode);
47488
47489- cii->c_cached_epoch = atomic_read(&permission_epoch);
47490+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
47491 if (cii->c_uid != current_fsuid()) {
47492 cii->c_uid = current_fsuid();
47493 cii->c_cached_perm = mask;
47494@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
47495 void coda_cache_clear_inode(struct inode *inode)
47496 {
47497 struct coda_inode_info *cii = ITOC(inode);
47498- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
47499+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
47500 }
47501
47502 /* remove all acl caches */
47503 void coda_cache_clear_all(struct super_block *sb)
47504 {
47505- atomic_inc(&permission_epoch);
47506+ atomic_inc_unchecked(&permission_epoch);
47507 }
47508
47509
47510@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
47511
47512 hit = (mask & cii->c_cached_perm) == mask &&
47513 cii->c_uid == current_fsuid() &&
47514- cii->c_cached_epoch == atomic_read(&permission_epoch);
47515+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
47516
47517 return hit;
47518 }
47519diff --git a/fs/compat.c b/fs/compat.c
47520index d1e2411..27064e4 100644
47521--- a/fs/compat.c
47522+++ b/fs/compat.c
47523@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
47524 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
47525 {
47526 compat_ino_t ino = stat->ino;
47527- typeof(ubuf->st_uid) uid = 0;
47528- typeof(ubuf->st_gid) gid = 0;
47529+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
47530+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
47531 int err;
47532
47533 SET_UID(uid, stat->uid);
47534@@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
47535
47536 set_fs(KERNEL_DS);
47537 /* The __user pointer cast is valid because of the set_fs() */
47538- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
47539+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
47540 set_fs(oldfs);
47541 /* truncating is ok because it's a user address */
47542 if (!ret)
47543@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
47544
47545 struct compat_readdir_callback {
47546 struct compat_old_linux_dirent __user *dirent;
47547+ struct file * file;
47548 int result;
47549 };
47550
47551@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
47552 buf->result = -EOVERFLOW;
47553 return -EOVERFLOW;
47554 }
47555+
47556+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47557+ return 0;
47558+
47559 buf->result++;
47560 dirent = buf->dirent;
47561 if (!access_ok(VERIFY_WRITE, dirent,
47562@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
47563
47564 buf.result = 0;
47565 buf.dirent = dirent;
47566+ buf.file = file;
47567
47568 error = vfs_readdir(file, compat_fillonedir, &buf);
47569 if (buf.result)
47570@@ -899,6 +905,7 @@ struct compat_linux_dirent {
47571 struct compat_getdents_callback {
47572 struct compat_linux_dirent __user *current_dir;
47573 struct compat_linux_dirent __user *previous;
47574+ struct file * file;
47575 int count;
47576 int error;
47577 };
47578@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
47579 buf->error = -EOVERFLOW;
47580 return -EOVERFLOW;
47581 }
47582+
47583+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47584+ return 0;
47585+
47586 dirent = buf->previous;
47587 if (dirent) {
47588 if (__put_user(offset, &dirent->d_off))
47589@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
47590 buf.previous = NULL;
47591 buf.count = count;
47592 buf.error = 0;
47593+ buf.file = file;
47594
47595 error = vfs_readdir(file, compat_filldir, &buf);
47596 if (error >= 0)
47597@@ -987,6 +999,7 @@ out:
47598 struct compat_getdents_callback64 {
47599 struct linux_dirent64 __user *current_dir;
47600 struct linux_dirent64 __user *previous;
47601+ struct file * file;
47602 int count;
47603 int error;
47604 };
47605@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
47606 buf->error = -EINVAL; /* only used if we fail.. */
47607 if (reclen > buf->count)
47608 return -EINVAL;
47609+
47610+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47611+ return 0;
47612+
47613 dirent = buf->previous;
47614
47615 if (dirent) {
47616@@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
47617 buf.previous = NULL;
47618 buf.count = count;
47619 buf.error = 0;
47620+ buf.file = file;
47621
47622 error = vfs_readdir(file, compat_filldir64, &buf);
47623 if (error >= 0)
47624 error = buf.error;
47625 lastdirent = buf.previous;
47626 if (lastdirent) {
47627- typeof(lastdirent->d_off) d_off = file->f_pos;
47628+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
47629 if (__put_user_unaligned(d_off, &lastdirent->d_off))
47630 error = -EFAULT;
47631 else
47632@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
47633 * verify all the pointers
47634 */
47635 ret = -EINVAL;
47636- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
47637+ if (nr_segs > UIO_MAXIOV)
47638 goto out;
47639 if (!file->f_op)
47640 goto out;
47641@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
47642 compat_uptr_t __user *envp,
47643 struct pt_regs * regs)
47644 {
47645+#ifdef CONFIG_GRKERNSEC
47646+ struct file *old_exec_file;
47647+ struct acl_subject_label *old_acl;
47648+ struct rlimit old_rlim[RLIM_NLIMITS];
47649+#endif
47650 struct linux_binprm *bprm;
47651 struct file *file;
47652 struct files_struct *displaced;
47653 bool clear_in_exec;
47654 int retval;
47655+ const struct cred *cred = current_cred();
47656+
47657+ /*
47658+ * We move the actual failure in case of RLIMIT_NPROC excess from
47659+ * set*uid() to execve() because too many poorly written programs
47660+ * don't check setuid() return code. Here we additionally recheck
47661+ * whether NPROC limit is still exceeded.
47662+ */
47663+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
47664+
47665+ if ((current->flags & PF_NPROC_EXCEEDED) &&
47666+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
47667+ retval = -EAGAIN;
47668+ goto out_ret;
47669+ }
47670+
47671+ /* We're below the limit (still or again), so we don't want to make
47672+ * further execve() calls fail. */
47673+ current->flags &= ~PF_NPROC_EXCEEDED;
47674
47675 retval = unshare_files(&displaced);
47676 if (retval)
47677@@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
47678 bprm->filename = filename;
47679 bprm->interp = filename;
47680
47681+ if (gr_process_user_ban()) {
47682+ retval = -EPERM;
47683+ goto out_file;
47684+ }
47685+
47686+ retval = -EACCES;
47687+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
47688+ goto out_file;
47689+
47690 retval = bprm_mm_init(bprm);
47691 if (retval)
47692 goto out_file;
47693@@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
47694 if (retval < 0)
47695 goto out;
47696
47697+ if (!gr_tpe_allow(file)) {
47698+ retval = -EACCES;
47699+ goto out;
47700+ }
47701+
47702+ if (gr_check_crash_exec(file)) {
47703+ retval = -EACCES;
47704+ goto out;
47705+ }
47706+
47707+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
47708+
47709+ gr_handle_exec_args_compat(bprm, argv);
47710+
47711+#ifdef CONFIG_GRKERNSEC
47712+ old_acl = current->acl;
47713+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
47714+ old_exec_file = current->exec_file;
47715+ get_file(file);
47716+ current->exec_file = file;
47717+#endif
47718+
47719+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
47720+ bprm->unsafe & LSM_UNSAFE_SHARE);
47721+ if (retval < 0)
47722+ goto out_fail;
47723+
47724 retval = search_binary_handler(bprm, regs);
47725 if (retval < 0)
47726- goto out;
47727+ goto out_fail;
47728+#ifdef CONFIG_GRKERNSEC
47729+ if (old_exec_file)
47730+ fput(old_exec_file);
47731+#endif
47732
47733 /* execve succeeded */
47734 current->fs->in_exec = 0;
47735@@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
47736 put_files_struct(displaced);
47737 return retval;
47738
47739+out_fail:
47740+#ifdef CONFIG_GRKERNSEC
47741+ current->acl = old_acl;
47742+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
47743+ fput(current->exec_file);
47744+ current->exec_file = old_exec_file;
47745+#endif
47746+
47747 out:
47748 if (bprm->mm) {
47749 acct_arg_size(bprm, 0);
47750@@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
47751 struct fdtable *fdt;
47752 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
47753
47754+ pax_track_stack();
47755+
47756 if (n < 0)
47757 goto out_nofds;
47758
47759@@ -2151,7 +2243,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
47760 oldfs = get_fs();
47761 set_fs(KERNEL_DS);
47762 /* The __user pointer casts are valid because of the set_fs() */
47763- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
47764+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
47765 set_fs(oldfs);
47766
47767 if (err)
47768diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
47769index 0adced2..bbb1b0d 100644
47770--- a/fs/compat_binfmt_elf.c
47771+++ b/fs/compat_binfmt_elf.c
47772@@ -29,10 +29,12 @@
47773 #undef elfhdr
47774 #undef elf_phdr
47775 #undef elf_note
47776+#undef elf_dyn
47777 #undef elf_addr_t
47778 #define elfhdr elf32_hdr
47779 #define elf_phdr elf32_phdr
47780 #define elf_note elf32_note
47781+#define elf_dyn Elf32_Dyn
47782 #define elf_addr_t Elf32_Addr
47783
47784 /*
47785diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
47786index d84e705..d8c364c 100644
47787--- a/fs/compat_ioctl.c
47788+++ b/fs/compat_ioctl.c
47789@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
47790 up = (struct compat_video_spu_palette __user *) arg;
47791 err = get_user(palp, &up->palette);
47792 err |= get_user(length, &up->length);
47793+ if (err)
47794+ return -EFAULT;
47795
47796 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
47797 err = put_user(compat_ptr(palp), &up_native->palette);
47798@@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
47799 return -EFAULT;
47800 if (__get_user(udata, &ss32->iomem_base))
47801 return -EFAULT;
47802- ss.iomem_base = compat_ptr(udata);
47803+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
47804 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
47805 __get_user(ss.port_high, &ss32->port_high))
47806 return -EFAULT;
47807@@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
47808 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
47809 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
47810 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
47811- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
47812+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
47813 return -EFAULT;
47814
47815 return ioctl_preallocate(file, p);
47816diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
47817index 8e48b52..f01ed91 100644
47818--- a/fs/configfs/dir.c
47819+++ b/fs/configfs/dir.c
47820@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
47821 }
47822 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
47823 struct configfs_dirent *next;
47824- const char * name;
47825+ const unsigned char * name;
47826+ char d_name[sizeof(next->s_dentry->d_iname)];
47827 int len;
47828
47829 next = list_entry(p, struct configfs_dirent,
47830@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
47831 continue;
47832
47833 name = configfs_get_name(next);
47834- len = strlen(name);
47835+ if (next->s_dentry && name == next->s_dentry->d_iname) {
47836+ len = next->s_dentry->d_name.len;
47837+ memcpy(d_name, name, len);
47838+ name = d_name;
47839+ } else
47840+ len = strlen(name);
47841 if (next->s_dentry)
47842 ino = next->s_dentry->d_inode->i_ino;
47843 else
47844diff --git a/fs/dcache.c b/fs/dcache.c
47845index 44c0aea..2529092 100644
47846--- a/fs/dcache.c
47847+++ b/fs/dcache.c
47848@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
47849
47850 static struct kmem_cache *dentry_cache __read_mostly;
47851
47852-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
47853-
47854 /*
47855 * This is the single most critical data structure when it comes
47856 * to the dcache: the hashtable for lookups. Somebody should try
47857@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
47858 mempages -= reserve;
47859
47860 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
47861- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
47862+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
47863
47864 dcache_init();
47865 inode_init();
47866diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
47867index c010ecf..a8d8c59 100644
47868--- a/fs/dlm/lockspace.c
47869+++ b/fs/dlm/lockspace.c
47870@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
47871 kfree(ls);
47872 }
47873
47874-static struct sysfs_ops dlm_attr_ops = {
47875+static const struct sysfs_ops dlm_attr_ops = {
47876 .show = dlm_attr_show,
47877 .store = dlm_attr_store,
47878 };
47879diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
47880index 88ba4d4..073f003 100644
47881--- a/fs/ecryptfs/inode.c
47882+++ b/fs/ecryptfs/inode.c
47883@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
47884 old_fs = get_fs();
47885 set_fs(get_ds());
47886 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
47887- (char __user *)lower_buf,
47888+ (char __force_user *)lower_buf,
47889 lower_bufsiz);
47890 set_fs(old_fs);
47891 if (rc < 0)
47892@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
47893 }
47894 old_fs = get_fs();
47895 set_fs(get_ds());
47896- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
47897+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
47898 set_fs(old_fs);
47899 if (rc < 0)
47900 goto out_free;
47901diff --git a/fs/exec.c b/fs/exec.c
47902index 86fafc6..b307bfa 100644
47903--- a/fs/exec.c
47904+++ b/fs/exec.c
47905@@ -56,12 +56,24 @@
47906 #include <linux/fsnotify.h>
47907 #include <linux/fs_struct.h>
47908 #include <linux/pipe_fs_i.h>
47909+#include <linux/random.h>
47910+#include <linux/seq_file.h>
47911+
47912+#ifdef CONFIG_PAX_REFCOUNT
47913+#include <linux/kallsyms.h>
47914+#include <linux/kdebug.h>
47915+#endif
47916
47917 #include <asm/uaccess.h>
47918 #include <asm/mmu_context.h>
47919 #include <asm/tlb.h>
47920 #include "internal.h"
47921
47922+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
47923+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
47924+EXPORT_SYMBOL(pax_set_initial_flags_func);
47925+#endif
47926+
47927 int core_uses_pid;
47928 char core_pattern[CORENAME_MAX_SIZE] = "core";
47929 unsigned int core_pipe_limit;
47930@@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
47931 int write)
47932 {
47933 struct page *page;
47934- int ret;
47935
47936-#ifdef CONFIG_STACK_GROWSUP
47937- if (write) {
47938- ret = expand_stack_downwards(bprm->vma, pos);
47939- if (ret < 0)
47940- return NULL;
47941- }
47942-#endif
47943- ret = get_user_pages(current, bprm->mm, pos,
47944- 1, write, 1, &page, NULL);
47945- if (ret <= 0)
47946+ if (0 > expand_stack_downwards(bprm->vma, pos))
47947+ return NULL;
47948+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
47949 return NULL;
47950
47951 if (write) {
47952@@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
47953 vma->vm_end = STACK_TOP_MAX;
47954 vma->vm_start = vma->vm_end - PAGE_SIZE;
47955 vma->vm_flags = VM_STACK_FLAGS;
47956+
47957+#ifdef CONFIG_PAX_SEGMEXEC
47958+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
47959+#endif
47960+
47961 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
47962
47963 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
47964@@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
47965 mm->stack_vm = mm->total_vm = 1;
47966 up_write(&mm->mmap_sem);
47967 bprm->p = vma->vm_end - sizeof(void *);
47968+
47969+#ifdef CONFIG_PAX_RANDUSTACK
47970+ if (randomize_va_space)
47971+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
47972+#endif
47973+
47974 return 0;
47975 err:
47976 up_write(&mm->mmap_sem);
47977@@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
47978 int r;
47979 mm_segment_t oldfs = get_fs();
47980 set_fs(KERNEL_DS);
47981- r = copy_strings(argc, (char __user * __user *)argv, bprm);
47982+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
47983 set_fs(oldfs);
47984 return r;
47985 }
47986@@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
47987 unsigned long new_end = old_end - shift;
47988 struct mmu_gather *tlb;
47989
47990- BUG_ON(new_start > new_end);
47991+ if (new_start >= new_end || new_start < mmap_min_addr)
47992+ return -ENOMEM;
47993
47994 /*
47995 * ensure there are no vmas between where we want to go
47996@@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
47997 if (vma != find_vma(mm, new_start))
47998 return -EFAULT;
47999
48000+#ifdef CONFIG_PAX_SEGMEXEC
48001+ BUG_ON(pax_find_mirror_vma(vma));
48002+#endif
48003+
48004 /*
48005 * cover the whole range: [new_start, old_end)
48006 */
48007@@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
48008 stack_top = arch_align_stack(stack_top);
48009 stack_top = PAGE_ALIGN(stack_top);
48010
48011- if (unlikely(stack_top < mmap_min_addr) ||
48012- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
48013- return -ENOMEM;
48014-
48015 stack_shift = vma->vm_end - stack_top;
48016
48017 bprm->p -= stack_shift;
48018@@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
48019 bprm->exec -= stack_shift;
48020
48021 down_write(&mm->mmap_sem);
48022+
48023+ /* Move stack pages down in memory. */
48024+ if (stack_shift) {
48025+ ret = shift_arg_pages(vma, stack_shift);
48026+ if (ret)
48027+ goto out_unlock;
48028+ }
48029+
48030 vm_flags = VM_STACK_FLAGS;
48031
48032 /*
48033@@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
48034 vm_flags &= ~VM_EXEC;
48035 vm_flags |= mm->def_flags;
48036
48037+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48038+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48039+ vm_flags &= ~VM_EXEC;
48040+
48041+#ifdef CONFIG_PAX_MPROTECT
48042+ if (mm->pax_flags & MF_PAX_MPROTECT)
48043+ vm_flags &= ~VM_MAYEXEC;
48044+#endif
48045+
48046+ }
48047+#endif
48048+
48049 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
48050 vm_flags);
48051 if (ret)
48052 goto out_unlock;
48053 BUG_ON(prev != vma);
48054
48055- /* Move stack pages down in memory. */
48056- if (stack_shift) {
48057- ret = shift_arg_pages(vma, stack_shift);
48058- if (ret)
48059- goto out_unlock;
48060- }
48061-
48062 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
48063 stack_size = vma->vm_end - vma->vm_start;
48064 /*
48065@@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_t offset,
48066 old_fs = get_fs();
48067 set_fs(get_ds());
48068 /* The cast to a user pointer is valid due to the set_fs() */
48069- result = vfs_read(file, (void __user *)addr, count, &pos);
48070+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
48071 set_fs(old_fs);
48072 return result;
48073 }
48074@@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
48075 }
48076 rcu_read_unlock();
48077
48078- if (p->fs->users > n_fs) {
48079+ if (atomic_read(&p->fs->users) > n_fs) {
48080 bprm->unsafe |= LSM_UNSAFE_SHARE;
48081 } else {
48082 res = -EAGAIN;
48083@@ -1347,11 +1376,35 @@ int do_execve(char * filename,
48084 char __user *__user *envp,
48085 struct pt_regs * regs)
48086 {
48087+#ifdef CONFIG_GRKERNSEC
48088+ struct file *old_exec_file;
48089+ struct acl_subject_label *old_acl;
48090+ struct rlimit old_rlim[RLIM_NLIMITS];
48091+#endif
48092 struct linux_binprm *bprm;
48093 struct file *file;
48094 struct files_struct *displaced;
48095 bool clear_in_exec;
48096 int retval;
48097+ const struct cred *cred = current_cred();
48098+
48099+ /*
48100+ * We move the actual failure in case of RLIMIT_NPROC excess from
48101+ * set*uid() to execve() because too many poorly written programs
48102+ * don't check setuid() return code. Here we additionally recheck
48103+ * whether NPROC limit is still exceeded.
48104+ */
48105+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48106+
48107+ if ((current->flags & PF_NPROC_EXCEEDED) &&
48108+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48109+ retval = -EAGAIN;
48110+ goto out_ret;
48111+ }
48112+
48113+ /* We're below the limit (still or again), so we don't want to make
48114+ * further execve() calls fail. */
48115+ current->flags &= ~PF_NPROC_EXCEEDED;
48116
48117 retval = unshare_files(&displaced);
48118 if (retval)
48119@@ -1383,6 +1436,16 @@ int do_execve(char * filename,
48120 bprm->filename = filename;
48121 bprm->interp = filename;
48122
48123+ if (gr_process_user_ban()) {
48124+ retval = -EPERM;
48125+ goto out_file;
48126+ }
48127+
48128+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
48129+ retval = -EACCES;
48130+ goto out_file;
48131+ }
48132+
48133 retval = bprm_mm_init(bprm);
48134 if (retval)
48135 goto out_file;
48136@@ -1412,10 +1475,41 @@ int do_execve(char * filename,
48137 if (retval < 0)
48138 goto out;
48139
48140+ if (!gr_tpe_allow(file)) {
48141+ retval = -EACCES;
48142+ goto out;
48143+ }
48144+
48145+ if (gr_check_crash_exec(file)) {
48146+ retval = -EACCES;
48147+ goto out;
48148+ }
48149+
48150+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48151+
48152+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
48153+
48154+#ifdef CONFIG_GRKERNSEC
48155+ old_acl = current->acl;
48156+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48157+ old_exec_file = current->exec_file;
48158+ get_file(file);
48159+ current->exec_file = file;
48160+#endif
48161+
48162+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48163+ bprm->unsafe & LSM_UNSAFE_SHARE);
48164+ if (retval < 0)
48165+ goto out_fail;
48166+
48167 current->flags &= ~PF_KTHREAD;
48168 retval = search_binary_handler(bprm,regs);
48169 if (retval < 0)
48170- goto out;
48171+ goto out_fail;
48172+#ifdef CONFIG_GRKERNSEC
48173+ if (old_exec_file)
48174+ fput(old_exec_file);
48175+#endif
48176
48177 /* execve succeeded */
48178 current->fs->in_exec = 0;
48179@@ -1426,6 +1520,14 @@ int do_execve(char * filename,
48180 put_files_struct(displaced);
48181 return retval;
48182
48183+out_fail:
48184+#ifdef CONFIG_GRKERNSEC
48185+ current->acl = old_acl;
48186+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48187+ fput(current->exec_file);
48188+ current->exec_file = old_exec_file;
48189+#endif
48190+
48191 out:
48192 if (bprm->mm) {
48193 acct_arg_size(bprm, 0);
48194@@ -1591,6 +1693,220 @@ out:
48195 return ispipe;
48196 }
48197
48198+int pax_check_flags(unsigned long *flags)
48199+{
48200+ int retval = 0;
48201+
48202+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
48203+ if (*flags & MF_PAX_SEGMEXEC)
48204+ {
48205+ *flags &= ~MF_PAX_SEGMEXEC;
48206+ retval = -EINVAL;
48207+ }
48208+#endif
48209+
48210+ if ((*flags & MF_PAX_PAGEEXEC)
48211+
48212+#ifdef CONFIG_PAX_PAGEEXEC
48213+ && (*flags & MF_PAX_SEGMEXEC)
48214+#endif
48215+
48216+ )
48217+ {
48218+ *flags &= ~MF_PAX_PAGEEXEC;
48219+ retval = -EINVAL;
48220+ }
48221+
48222+ if ((*flags & MF_PAX_MPROTECT)
48223+
48224+#ifdef CONFIG_PAX_MPROTECT
48225+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48226+#endif
48227+
48228+ )
48229+ {
48230+ *flags &= ~MF_PAX_MPROTECT;
48231+ retval = -EINVAL;
48232+ }
48233+
48234+ if ((*flags & MF_PAX_EMUTRAMP)
48235+
48236+#ifdef CONFIG_PAX_EMUTRAMP
48237+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48238+#endif
48239+
48240+ )
48241+ {
48242+ *flags &= ~MF_PAX_EMUTRAMP;
48243+ retval = -EINVAL;
48244+ }
48245+
48246+ return retval;
48247+}
48248+
48249+EXPORT_SYMBOL(pax_check_flags);
48250+
48251+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48252+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
48253+{
48254+ struct task_struct *tsk = current;
48255+ struct mm_struct *mm = current->mm;
48256+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
48257+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
48258+ char *path_exec = NULL;
48259+ char *path_fault = NULL;
48260+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
48261+
48262+ if (buffer_exec && buffer_fault) {
48263+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
48264+
48265+ down_read(&mm->mmap_sem);
48266+ vma = mm->mmap;
48267+ while (vma && (!vma_exec || !vma_fault)) {
48268+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
48269+ vma_exec = vma;
48270+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
48271+ vma_fault = vma;
48272+ vma = vma->vm_next;
48273+ }
48274+ if (vma_exec) {
48275+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
48276+ if (IS_ERR(path_exec))
48277+ path_exec = "<path too long>";
48278+ else {
48279+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
48280+ if (path_exec) {
48281+ *path_exec = 0;
48282+ path_exec = buffer_exec;
48283+ } else
48284+ path_exec = "<path too long>";
48285+ }
48286+ }
48287+ if (vma_fault) {
48288+ start = vma_fault->vm_start;
48289+ end = vma_fault->vm_end;
48290+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
48291+ if (vma_fault->vm_file) {
48292+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
48293+ if (IS_ERR(path_fault))
48294+ path_fault = "<path too long>";
48295+ else {
48296+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
48297+ if (path_fault) {
48298+ *path_fault = 0;
48299+ path_fault = buffer_fault;
48300+ } else
48301+ path_fault = "<path too long>";
48302+ }
48303+ } else
48304+ path_fault = "<anonymous mapping>";
48305+ }
48306+ up_read(&mm->mmap_sem);
48307+ }
48308+ if (tsk->signal->curr_ip)
48309+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
48310+ else
48311+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
48312+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
48313+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
48314+ task_uid(tsk), task_euid(tsk), pc, sp);
48315+ free_page((unsigned long)buffer_exec);
48316+ free_page((unsigned long)buffer_fault);
48317+ pax_report_insns(regs, pc, sp);
48318+ do_coredump(SIGKILL, SIGKILL, regs);
48319+}
48320+#endif
48321+
48322+#ifdef CONFIG_PAX_REFCOUNT
48323+void pax_report_refcount_overflow(struct pt_regs *regs)
48324+{
48325+ if (current->signal->curr_ip)
48326+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48327+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
48328+ else
48329+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48330+ current->comm, task_pid_nr(current), current_uid(), current_euid());
48331+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
48332+ show_regs(regs);
48333+ force_sig_specific(SIGKILL, current);
48334+}
48335+#endif
48336+
48337+#ifdef CONFIG_PAX_USERCOPY
48338+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
48339+int object_is_on_stack(const void *obj, unsigned long len)
48340+{
48341+ const void * const stack = task_stack_page(current);
48342+ const void * const stackend = stack + THREAD_SIZE;
48343+
48344+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48345+ const void *frame = NULL;
48346+ const void *oldframe;
48347+#endif
48348+
48349+ if (obj + len < obj)
48350+ return -1;
48351+
48352+ if (obj + len <= stack || stackend <= obj)
48353+ return 0;
48354+
48355+ if (obj < stack || stackend < obj + len)
48356+ return -1;
48357+
48358+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48359+ oldframe = __builtin_frame_address(1);
48360+ if (oldframe)
48361+ frame = __builtin_frame_address(2);
48362+ /*
48363+ low ----------------------------------------------> high
48364+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
48365+ ^----------------^
48366+ allow copies only within here
48367+ */
48368+ while (stack <= frame && frame < stackend) {
48369+ /* if obj + len extends past the last frame, this
48370+ check won't pass and the next frame will be 0,
48371+ causing us to bail out and correctly report
48372+ the copy as invalid
48373+ */
48374+ if (obj + len <= frame)
48375+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
48376+ oldframe = frame;
48377+ frame = *(const void * const *)frame;
48378+ }
48379+ return -1;
48380+#else
48381+ return 1;
48382+#endif
48383+}
48384+
48385+
48386+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
48387+{
48388+ if (current->signal->curr_ip)
48389+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48390+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48391+ else
48392+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48393+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48394+
48395+ dump_stack();
48396+ gr_handle_kernel_exploit();
48397+ do_group_exit(SIGKILL);
48398+}
48399+#endif
48400+
48401+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
48402+void pax_track_stack(void)
48403+{
48404+ unsigned long sp = (unsigned long)&sp;
48405+ if (sp < current_thread_info()->lowest_stack &&
48406+ sp > (unsigned long)task_stack_page(current))
48407+ current_thread_info()->lowest_stack = sp;
48408+}
48409+EXPORT_SYMBOL(pax_track_stack);
48410+#endif
48411+
48412 static int zap_process(struct task_struct *start)
48413 {
48414 struct task_struct *t;
48415@@ -1793,17 +2109,17 @@ static void wait_for_dump_helpers(struct file *file)
48416 pipe = file->f_path.dentry->d_inode->i_pipe;
48417
48418 pipe_lock(pipe);
48419- pipe->readers++;
48420- pipe->writers--;
48421+ atomic_inc(&pipe->readers);
48422+ atomic_dec(&pipe->writers);
48423
48424- while ((pipe->readers > 1) && (!signal_pending(current))) {
48425+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
48426 wake_up_interruptible_sync(&pipe->wait);
48427 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
48428 pipe_wait(pipe);
48429 }
48430
48431- pipe->readers--;
48432- pipe->writers++;
48433+ atomic_dec(&pipe->readers);
48434+ atomic_inc(&pipe->writers);
48435 pipe_unlock(pipe);
48436
48437 }
48438@@ -1826,10 +2142,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48439 char **helper_argv = NULL;
48440 int helper_argc = 0;
48441 int dump_count = 0;
48442- static atomic_t core_dump_count = ATOMIC_INIT(0);
48443+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
48444
48445 audit_core_dumps(signr);
48446
48447+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
48448+ gr_handle_brute_attach(current, mm->flags);
48449+
48450 binfmt = mm->binfmt;
48451 if (!binfmt || !binfmt->core_dump)
48452 goto fail;
48453@@ -1874,6 +2193,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48454 */
48455 clear_thread_flag(TIF_SIGPENDING);
48456
48457+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
48458+
48459 /*
48460 * lock_kernel() because format_corename() is controlled by sysctl, which
48461 * uses lock_kernel()
48462@@ -1908,7 +2229,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48463 goto fail_unlock;
48464 }
48465
48466- dump_count = atomic_inc_return(&core_dump_count);
48467+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
48468 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
48469 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
48470 task_tgid_vnr(current), current->comm);
48471@@ -1972,7 +2293,7 @@ close_fail:
48472 filp_close(file, NULL);
48473 fail_dropcount:
48474 if (dump_count)
48475- atomic_dec(&core_dump_count);
48476+ atomic_dec_unchecked(&core_dump_count);
48477 fail_unlock:
48478 if (helper_argv)
48479 argv_free(helper_argv);
48480diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
48481index 7f8d2e5..a1abdbb 100644
48482--- a/fs/ext2/balloc.c
48483+++ b/fs/ext2/balloc.c
48484@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
48485
48486 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
48487 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
48488- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
48489+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
48490 sbi->s_resuid != current_fsuid() &&
48491 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
48492 return 0;
48493diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
48494index 27967f9..9f2a5fb 100644
48495--- a/fs/ext3/balloc.c
48496+++ b/fs/ext3/balloc.c
48497@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
48498
48499 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
48500 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
48501- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
48502+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
48503 sbi->s_resuid != current_fsuid() &&
48504 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
48505 return 0;
48506diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
48507index e85b63c..80398e6 100644
48508--- a/fs/ext4/balloc.c
48509+++ b/fs/ext4/balloc.c
48510@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
48511 /* Hm, nope. Are (enough) root reserved blocks available? */
48512 if (sbi->s_resuid == current_fsuid() ||
48513 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
48514- capable(CAP_SYS_RESOURCE)) {
48515+ capable_nolog(CAP_SYS_RESOURCE)) {
48516 if (free_blocks >= (nblocks + dirty_blocks))
48517 return 1;
48518 }
48519diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
48520index 67c46ed..1f237e5 100644
48521--- a/fs/ext4/ext4.h
48522+++ b/fs/ext4/ext4.h
48523@@ -1077,19 +1077,19 @@ struct ext4_sb_info {
48524
48525 /* stats for buddy allocator */
48526 spinlock_t s_mb_pa_lock;
48527- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
48528- atomic_t s_bal_success; /* we found long enough chunks */
48529- atomic_t s_bal_allocated; /* in blocks */
48530- atomic_t s_bal_ex_scanned; /* total extents scanned */
48531- atomic_t s_bal_goals; /* goal hits */
48532- atomic_t s_bal_breaks; /* too long searches */
48533- atomic_t s_bal_2orders; /* 2^order hits */
48534+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
48535+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
48536+ atomic_unchecked_t s_bal_allocated; /* in blocks */
48537+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
48538+ atomic_unchecked_t s_bal_goals; /* goal hits */
48539+ atomic_unchecked_t s_bal_breaks; /* too long searches */
48540+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
48541 spinlock_t s_bal_lock;
48542 unsigned long s_mb_buddies_generated;
48543 unsigned long long s_mb_generation_time;
48544- atomic_t s_mb_lost_chunks;
48545- atomic_t s_mb_preallocated;
48546- atomic_t s_mb_discarded;
48547+ atomic_unchecked_t s_mb_lost_chunks;
48548+ atomic_unchecked_t s_mb_preallocated;
48549+ atomic_unchecked_t s_mb_discarded;
48550 atomic_t s_lock_busy;
48551
48552 /* locality groups */
48553diff --git a/fs/ext4/file.c b/fs/ext4/file.c
48554index 2a60541..7439d61 100644
48555--- a/fs/ext4/file.c
48556+++ b/fs/ext4/file.c
48557@@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
48558 cp = d_path(&path, buf, sizeof(buf));
48559 path_put(&path);
48560 if (!IS_ERR(cp)) {
48561- memcpy(sbi->s_es->s_last_mounted, cp,
48562- sizeof(sbi->s_es->s_last_mounted));
48563+ strlcpy(sbi->s_es->s_last_mounted, cp,
48564+ sizeof(sbi->s_es->s_last_mounted));
48565 sb->s_dirt = 1;
48566 }
48567 }
48568diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
48569index 42bac1b..0aab9d8 100644
48570--- a/fs/ext4/mballoc.c
48571+++ b/fs/ext4/mballoc.c
48572@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
48573 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
48574
48575 if (EXT4_SB(sb)->s_mb_stats)
48576- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
48577+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
48578
48579 break;
48580 }
48581@@ -2131,7 +2131,7 @@ repeat:
48582 ac->ac_status = AC_STATUS_CONTINUE;
48583 ac->ac_flags |= EXT4_MB_HINT_FIRST;
48584 cr = 3;
48585- atomic_inc(&sbi->s_mb_lost_chunks);
48586+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
48587 goto repeat;
48588 }
48589 }
48590@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
48591 ext4_grpblk_t counters[16];
48592 } sg;
48593
48594+ pax_track_stack();
48595+
48596 group--;
48597 if (group == 0)
48598 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
48599@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
48600 if (sbi->s_mb_stats) {
48601 printk(KERN_INFO
48602 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
48603- atomic_read(&sbi->s_bal_allocated),
48604- atomic_read(&sbi->s_bal_reqs),
48605- atomic_read(&sbi->s_bal_success));
48606+ atomic_read_unchecked(&sbi->s_bal_allocated),
48607+ atomic_read_unchecked(&sbi->s_bal_reqs),
48608+ atomic_read_unchecked(&sbi->s_bal_success));
48609 printk(KERN_INFO
48610 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
48611 "%u 2^N hits, %u breaks, %u lost\n",
48612- atomic_read(&sbi->s_bal_ex_scanned),
48613- atomic_read(&sbi->s_bal_goals),
48614- atomic_read(&sbi->s_bal_2orders),
48615- atomic_read(&sbi->s_bal_breaks),
48616- atomic_read(&sbi->s_mb_lost_chunks));
48617+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
48618+ atomic_read_unchecked(&sbi->s_bal_goals),
48619+ atomic_read_unchecked(&sbi->s_bal_2orders),
48620+ atomic_read_unchecked(&sbi->s_bal_breaks),
48621+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
48622 printk(KERN_INFO
48623 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
48624 sbi->s_mb_buddies_generated++,
48625 sbi->s_mb_generation_time);
48626 printk(KERN_INFO
48627 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
48628- atomic_read(&sbi->s_mb_preallocated),
48629- atomic_read(&sbi->s_mb_discarded));
48630+ atomic_read_unchecked(&sbi->s_mb_preallocated),
48631+ atomic_read_unchecked(&sbi->s_mb_discarded));
48632 }
48633
48634 free_percpu(sbi->s_locality_groups);
48635@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
48636 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
48637
48638 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
48639- atomic_inc(&sbi->s_bal_reqs);
48640- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
48641+ atomic_inc_unchecked(&sbi->s_bal_reqs);
48642+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
48643 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
48644- atomic_inc(&sbi->s_bal_success);
48645- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
48646+ atomic_inc_unchecked(&sbi->s_bal_success);
48647+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
48648 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
48649 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
48650- atomic_inc(&sbi->s_bal_goals);
48651+ atomic_inc_unchecked(&sbi->s_bal_goals);
48652 if (ac->ac_found > sbi->s_mb_max_to_scan)
48653- atomic_inc(&sbi->s_bal_breaks);
48654+ atomic_inc_unchecked(&sbi->s_bal_breaks);
48655 }
48656
48657 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
48658@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
48659 trace_ext4_mb_new_inode_pa(ac, pa);
48660
48661 ext4_mb_use_inode_pa(ac, pa);
48662- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48663+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48664
48665 ei = EXT4_I(ac->ac_inode);
48666 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
48667@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
48668 trace_ext4_mb_new_group_pa(ac, pa);
48669
48670 ext4_mb_use_group_pa(ac, pa);
48671- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48672+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48673
48674 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
48675 lg = ac->ac_lg;
48676@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
48677 * from the bitmap and continue.
48678 */
48679 }
48680- atomic_add(free, &sbi->s_mb_discarded);
48681+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
48682
48683 return err;
48684 }
48685@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
48686 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
48687 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
48688 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
48689- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
48690+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
48691
48692 if (ac) {
48693 ac->ac_sb = sb;
48694diff --git a/fs/ext4/super.c b/fs/ext4/super.c
48695index f27e045..be5a1c3 100644
48696--- a/fs/ext4/super.c
48697+++ b/fs/ext4/super.c
48698@@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobject *kobj)
48699 }
48700
48701
48702-static struct sysfs_ops ext4_attr_ops = {
48703+static const struct sysfs_ops ext4_attr_ops = {
48704 .show = ext4_attr_show,
48705 .store = ext4_attr_store,
48706 };
48707diff --git a/fs/fcntl.c b/fs/fcntl.c
48708index 97e01dc..e9aab2d 100644
48709--- a/fs/fcntl.c
48710+++ b/fs/fcntl.c
48711@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
48712 if (err)
48713 return err;
48714
48715+ if (gr_handle_chroot_fowner(pid, type))
48716+ return -ENOENT;
48717+ if (gr_check_protected_task_fowner(pid, type))
48718+ return -EACCES;
48719+
48720 f_modown(filp, pid, type, force);
48721 return 0;
48722 }
48723@@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
48724
48725 static int f_setown_ex(struct file *filp, unsigned long arg)
48726 {
48727- struct f_owner_ex * __user owner_p = (void * __user)arg;
48728+ struct f_owner_ex __user *owner_p = (void __user *)arg;
48729 struct f_owner_ex owner;
48730 struct pid *pid;
48731 int type;
48732@@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
48733
48734 static int f_getown_ex(struct file *filp, unsigned long arg)
48735 {
48736- struct f_owner_ex * __user owner_p = (void * __user)arg;
48737+ struct f_owner_ex __user *owner_p = (void __user *)arg;
48738 struct f_owner_ex owner;
48739 int ret = 0;
48740
48741@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
48742 switch (cmd) {
48743 case F_DUPFD:
48744 case F_DUPFD_CLOEXEC:
48745+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
48746 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
48747 break;
48748 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
48749diff --git a/fs/fifo.c b/fs/fifo.c
48750index f8f97b8..b1f2259 100644
48751--- a/fs/fifo.c
48752+++ b/fs/fifo.c
48753@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
48754 */
48755 filp->f_op = &read_pipefifo_fops;
48756 pipe->r_counter++;
48757- if (pipe->readers++ == 0)
48758+ if (atomic_inc_return(&pipe->readers) == 1)
48759 wake_up_partner(inode);
48760
48761- if (!pipe->writers) {
48762+ if (!atomic_read(&pipe->writers)) {
48763 if ((filp->f_flags & O_NONBLOCK)) {
48764 /* suppress POLLHUP until we have
48765 * seen a writer */
48766@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
48767 * errno=ENXIO when there is no process reading the FIFO.
48768 */
48769 ret = -ENXIO;
48770- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
48771+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
48772 goto err;
48773
48774 filp->f_op = &write_pipefifo_fops;
48775 pipe->w_counter++;
48776- if (!pipe->writers++)
48777+ if (atomic_inc_return(&pipe->writers) == 1)
48778 wake_up_partner(inode);
48779
48780- if (!pipe->readers) {
48781+ if (!atomic_read(&pipe->readers)) {
48782 wait_for_partner(inode, &pipe->r_counter);
48783 if (signal_pending(current))
48784 goto err_wr;
48785@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
48786 */
48787 filp->f_op = &rdwr_pipefifo_fops;
48788
48789- pipe->readers++;
48790- pipe->writers++;
48791+ atomic_inc(&pipe->readers);
48792+ atomic_inc(&pipe->writers);
48793 pipe->r_counter++;
48794 pipe->w_counter++;
48795- if (pipe->readers == 1 || pipe->writers == 1)
48796+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
48797 wake_up_partner(inode);
48798 break;
48799
48800@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
48801 return 0;
48802
48803 err_rd:
48804- if (!--pipe->readers)
48805+ if (atomic_dec_and_test(&pipe->readers))
48806 wake_up_interruptible(&pipe->wait);
48807 ret = -ERESTARTSYS;
48808 goto err;
48809
48810 err_wr:
48811- if (!--pipe->writers)
48812+ if (atomic_dec_and_test(&pipe->writers))
48813 wake_up_interruptible(&pipe->wait);
48814 ret = -ERESTARTSYS;
48815 goto err;
48816
48817 err:
48818- if (!pipe->readers && !pipe->writers)
48819+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
48820 free_pipe_info(inode);
48821
48822 err_nocleanup:
48823diff --git a/fs/file.c b/fs/file.c
48824index 87e1290..a930cc4 100644
48825--- a/fs/file.c
48826+++ b/fs/file.c
48827@@ -14,6 +14,7 @@
48828 #include <linux/slab.h>
48829 #include <linux/vmalloc.h>
48830 #include <linux/file.h>
48831+#include <linux/security.h>
48832 #include <linux/fdtable.h>
48833 #include <linux/bitops.h>
48834 #include <linux/interrupt.h>
48835@@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
48836 * N.B. For clone tasks sharing a files structure, this test
48837 * will limit the total number of files that can be opened.
48838 */
48839+
48840+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
48841 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
48842 return -EMFILE;
48843
48844diff --git a/fs/filesystems.c b/fs/filesystems.c
48845index a24c58e..53f91ee 100644
48846--- a/fs/filesystems.c
48847+++ b/fs/filesystems.c
48848@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
48849 int len = dot ? dot - name : strlen(name);
48850
48851 fs = __get_fs_type(name, len);
48852+
48853+#ifdef CONFIG_GRKERNSEC_MODHARDEN
48854+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
48855+#else
48856 if (!fs && (request_module("%.*s", len, name) == 0))
48857+#endif
48858 fs = __get_fs_type(name, len);
48859
48860 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
48861diff --git a/fs/fs_struct.c b/fs/fs_struct.c
48862index eee0590..ef5bc0e 100644
48863--- a/fs/fs_struct.c
48864+++ b/fs/fs_struct.c
48865@@ -4,6 +4,7 @@
48866 #include <linux/path.h>
48867 #include <linux/slab.h>
48868 #include <linux/fs_struct.h>
48869+#include <linux/grsecurity.h>
48870
48871 /*
48872 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
48873@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
48874 old_root = fs->root;
48875 fs->root = *path;
48876 path_get(path);
48877+ gr_set_chroot_entries(current, path);
48878 write_unlock(&fs->lock);
48879 if (old_root.dentry)
48880 path_put(&old_root);
48881@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
48882 && fs->root.mnt == old_root->mnt) {
48883 path_get(new_root);
48884 fs->root = *new_root;
48885+ gr_set_chroot_entries(p, new_root);
48886 count++;
48887 }
48888 if (fs->pwd.dentry == old_root->dentry
48889@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
48890 task_lock(tsk);
48891 write_lock(&fs->lock);
48892 tsk->fs = NULL;
48893- kill = !--fs->users;
48894+ gr_clear_chroot_entries(tsk);
48895+ kill = !atomic_dec_return(&fs->users);
48896 write_unlock(&fs->lock);
48897 task_unlock(tsk);
48898 if (kill)
48899@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
48900 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
48901 /* We don't need to lock fs - think why ;-) */
48902 if (fs) {
48903- fs->users = 1;
48904+ atomic_set(&fs->users, 1);
48905 fs->in_exec = 0;
48906 rwlock_init(&fs->lock);
48907 fs->umask = old->umask;
48908@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
48909
48910 task_lock(current);
48911 write_lock(&fs->lock);
48912- kill = !--fs->users;
48913+ kill = !atomic_dec_return(&fs->users);
48914 current->fs = new_fs;
48915+ gr_set_chroot_entries(current, &new_fs->root);
48916 write_unlock(&fs->lock);
48917 task_unlock(current);
48918
48919@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
48920
48921 /* to be mentioned only in INIT_TASK */
48922 struct fs_struct init_fs = {
48923- .users = 1,
48924+ .users = ATOMIC_INIT(1),
48925 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
48926 .umask = 0022,
48927 };
48928@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
48929 task_lock(current);
48930
48931 write_lock(&init_fs.lock);
48932- init_fs.users++;
48933+ atomic_inc(&init_fs.users);
48934 write_unlock(&init_fs.lock);
48935
48936 write_lock(&fs->lock);
48937 current->fs = &init_fs;
48938- kill = !--fs->users;
48939+ gr_set_chroot_entries(current, &current->fs->root);
48940+ kill = !atomic_dec_return(&fs->users);
48941 write_unlock(&fs->lock);
48942
48943 task_unlock(current);
48944diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
48945index 9905350..02eaec4 100644
48946--- a/fs/fscache/cookie.c
48947+++ b/fs/fscache/cookie.c
48948@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
48949 parent ? (char *) parent->def->name : "<no-parent>",
48950 def->name, netfs_data);
48951
48952- fscache_stat(&fscache_n_acquires);
48953+ fscache_stat_unchecked(&fscache_n_acquires);
48954
48955 /* if there's no parent cookie, then we don't create one here either */
48956 if (!parent) {
48957- fscache_stat(&fscache_n_acquires_null);
48958+ fscache_stat_unchecked(&fscache_n_acquires_null);
48959 _leave(" [no parent]");
48960 return NULL;
48961 }
48962@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
48963 /* allocate and initialise a cookie */
48964 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
48965 if (!cookie) {
48966- fscache_stat(&fscache_n_acquires_oom);
48967+ fscache_stat_unchecked(&fscache_n_acquires_oom);
48968 _leave(" [ENOMEM]");
48969 return NULL;
48970 }
48971@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
48972
48973 switch (cookie->def->type) {
48974 case FSCACHE_COOKIE_TYPE_INDEX:
48975- fscache_stat(&fscache_n_cookie_index);
48976+ fscache_stat_unchecked(&fscache_n_cookie_index);
48977 break;
48978 case FSCACHE_COOKIE_TYPE_DATAFILE:
48979- fscache_stat(&fscache_n_cookie_data);
48980+ fscache_stat_unchecked(&fscache_n_cookie_data);
48981 break;
48982 default:
48983- fscache_stat(&fscache_n_cookie_special);
48984+ fscache_stat_unchecked(&fscache_n_cookie_special);
48985 break;
48986 }
48987
48988@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
48989 if (fscache_acquire_non_index_cookie(cookie) < 0) {
48990 atomic_dec(&parent->n_children);
48991 __fscache_cookie_put(cookie);
48992- fscache_stat(&fscache_n_acquires_nobufs);
48993+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
48994 _leave(" = NULL");
48995 return NULL;
48996 }
48997 }
48998
48999- fscache_stat(&fscache_n_acquires_ok);
49000+ fscache_stat_unchecked(&fscache_n_acquires_ok);
49001 _leave(" = %p", cookie);
49002 return cookie;
49003 }
49004@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
49005 cache = fscache_select_cache_for_object(cookie->parent);
49006 if (!cache) {
49007 up_read(&fscache_addremove_sem);
49008- fscache_stat(&fscache_n_acquires_no_cache);
49009+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
49010 _leave(" = -ENOMEDIUM [no cache]");
49011 return -ENOMEDIUM;
49012 }
49013@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
49014 object = cache->ops->alloc_object(cache, cookie);
49015 fscache_stat_d(&fscache_n_cop_alloc_object);
49016 if (IS_ERR(object)) {
49017- fscache_stat(&fscache_n_object_no_alloc);
49018+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
49019 ret = PTR_ERR(object);
49020 goto error;
49021 }
49022
49023- fscache_stat(&fscache_n_object_alloc);
49024+ fscache_stat_unchecked(&fscache_n_object_alloc);
49025
49026 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
49027
49028@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
49029 struct fscache_object *object;
49030 struct hlist_node *_p;
49031
49032- fscache_stat(&fscache_n_updates);
49033+ fscache_stat_unchecked(&fscache_n_updates);
49034
49035 if (!cookie) {
49036- fscache_stat(&fscache_n_updates_null);
49037+ fscache_stat_unchecked(&fscache_n_updates_null);
49038 _leave(" [no cookie]");
49039 return;
49040 }
49041@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49042 struct fscache_object *object;
49043 unsigned long event;
49044
49045- fscache_stat(&fscache_n_relinquishes);
49046+ fscache_stat_unchecked(&fscache_n_relinquishes);
49047 if (retire)
49048- fscache_stat(&fscache_n_relinquishes_retire);
49049+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
49050
49051 if (!cookie) {
49052- fscache_stat(&fscache_n_relinquishes_null);
49053+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
49054 _leave(" [no cookie]");
49055 return;
49056 }
49057@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49058
49059 /* wait for the cookie to finish being instantiated (or to fail) */
49060 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
49061- fscache_stat(&fscache_n_relinquishes_waitcrt);
49062+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
49063 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
49064 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
49065 }
49066diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
49067index edd7434..0725e66 100644
49068--- a/fs/fscache/internal.h
49069+++ b/fs/fscache/internal.h
49070@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
49071 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
49072 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
49073
49074-extern atomic_t fscache_n_op_pend;
49075-extern atomic_t fscache_n_op_run;
49076-extern atomic_t fscache_n_op_enqueue;
49077-extern atomic_t fscache_n_op_deferred_release;
49078-extern atomic_t fscache_n_op_release;
49079-extern atomic_t fscache_n_op_gc;
49080-extern atomic_t fscache_n_op_cancelled;
49081-extern atomic_t fscache_n_op_rejected;
49082-
49083-extern atomic_t fscache_n_attr_changed;
49084-extern atomic_t fscache_n_attr_changed_ok;
49085-extern atomic_t fscache_n_attr_changed_nobufs;
49086-extern atomic_t fscache_n_attr_changed_nomem;
49087-extern atomic_t fscache_n_attr_changed_calls;
49088-
49089-extern atomic_t fscache_n_allocs;
49090-extern atomic_t fscache_n_allocs_ok;
49091-extern atomic_t fscache_n_allocs_wait;
49092-extern atomic_t fscache_n_allocs_nobufs;
49093-extern atomic_t fscache_n_allocs_intr;
49094-extern atomic_t fscache_n_allocs_object_dead;
49095-extern atomic_t fscache_n_alloc_ops;
49096-extern atomic_t fscache_n_alloc_op_waits;
49097-
49098-extern atomic_t fscache_n_retrievals;
49099-extern atomic_t fscache_n_retrievals_ok;
49100-extern atomic_t fscache_n_retrievals_wait;
49101-extern atomic_t fscache_n_retrievals_nodata;
49102-extern atomic_t fscache_n_retrievals_nobufs;
49103-extern atomic_t fscache_n_retrievals_intr;
49104-extern atomic_t fscache_n_retrievals_nomem;
49105-extern atomic_t fscache_n_retrievals_object_dead;
49106-extern atomic_t fscache_n_retrieval_ops;
49107-extern atomic_t fscache_n_retrieval_op_waits;
49108-
49109-extern atomic_t fscache_n_stores;
49110-extern atomic_t fscache_n_stores_ok;
49111-extern atomic_t fscache_n_stores_again;
49112-extern atomic_t fscache_n_stores_nobufs;
49113-extern atomic_t fscache_n_stores_oom;
49114-extern atomic_t fscache_n_store_ops;
49115-extern atomic_t fscache_n_store_calls;
49116-extern atomic_t fscache_n_store_pages;
49117-extern atomic_t fscache_n_store_radix_deletes;
49118-extern atomic_t fscache_n_store_pages_over_limit;
49119-
49120-extern atomic_t fscache_n_store_vmscan_not_storing;
49121-extern atomic_t fscache_n_store_vmscan_gone;
49122-extern atomic_t fscache_n_store_vmscan_busy;
49123-extern atomic_t fscache_n_store_vmscan_cancelled;
49124-
49125-extern atomic_t fscache_n_marks;
49126-extern atomic_t fscache_n_uncaches;
49127-
49128-extern atomic_t fscache_n_acquires;
49129-extern atomic_t fscache_n_acquires_null;
49130-extern atomic_t fscache_n_acquires_no_cache;
49131-extern atomic_t fscache_n_acquires_ok;
49132-extern atomic_t fscache_n_acquires_nobufs;
49133-extern atomic_t fscache_n_acquires_oom;
49134-
49135-extern atomic_t fscache_n_updates;
49136-extern atomic_t fscache_n_updates_null;
49137-extern atomic_t fscache_n_updates_run;
49138-
49139-extern atomic_t fscache_n_relinquishes;
49140-extern atomic_t fscache_n_relinquishes_null;
49141-extern atomic_t fscache_n_relinquishes_waitcrt;
49142-extern atomic_t fscache_n_relinquishes_retire;
49143-
49144-extern atomic_t fscache_n_cookie_index;
49145-extern atomic_t fscache_n_cookie_data;
49146-extern atomic_t fscache_n_cookie_special;
49147-
49148-extern atomic_t fscache_n_object_alloc;
49149-extern atomic_t fscache_n_object_no_alloc;
49150-extern atomic_t fscache_n_object_lookups;
49151-extern atomic_t fscache_n_object_lookups_negative;
49152-extern atomic_t fscache_n_object_lookups_positive;
49153-extern atomic_t fscache_n_object_lookups_timed_out;
49154-extern atomic_t fscache_n_object_created;
49155-extern atomic_t fscache_n_object_avail;
49156-extern atomic_t fscache_n_object_dead;
49157-
49158-extern atomic_t fscache_n_checkaux_none;
49159-extern atomic_t fscache_n_checkaux_okay;
49160-extern atomic_t fscache_n_checkaux_update;
49161-extern atomic_t fscache_n_checkaux_obsolete;
49162+extern atomic_unchecked_t fscache_n_op_pend;
49163+extern atomic_unchecked_t fscache_n_op_run;
49164+extern atomic_unchecked_t fscache_n_op_enqueue;
49165+extern atomic_unchecked_t fscache_n_op_deferred_release;
49166+extern atomic_unchecked_t fscache_n_op_release;
49167+extern atomic_unchecked_t fscache_n_op_gc;
49168+extern atomic_unchecked_t fscache_n_op_cancelled;
49169+extern atomic_unchecked_t fscache_n_op_rejected;
49170+
49171+extern atomic_unchecked_t fscache_n_attr_changed;
49172+extern atomic_unchecked_t fscache_n_attr_changed_ok;
49173+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
49174+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
49175+extern atomic_unchecked_t fscache_n_attr_changed_calls;
49176+
49177+extern atomic_unchecked_t fscache_n_allocs;
49178+extern atomic_unchecked_t fscache_n_allocs_ok;
49179+extern atomic_unchecked_t fscache_n_allocs_wait;
49180+extern atomic_unchecked_t fscache_n_allocs_nobufs;
49181+extern atomic_unchecked_t fscache_n_allocs_intr;
49182+extern atomic_unchecked_t fscache_n_allocs_object_dead;
49183+extern atomic_unchecked_t fscache_n_alloc_ops;
49184+extern atomic_unchecked_t fscache_n_alloc_op_waits;
49185+
49186+extern atomic_unchecked_t fscache_n_retrievals;
49187+extern atomic_unchecked_t fscache_n_retrievals_ok;
49188+extern atomic_unchecked_t fscache_n_retrievals_wait;
49189+extern atomic_unchecked_t fscache_n_retrievals_nodata;
49190+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
49191+extern atomic_unchecked_t fscache_n_retrievals_intr;
49192+extern atomic_unchecked_t fscache_n_retrievals_nomem;
49193+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
49194+extern atomic_unchecked_t fscache_n_retrieval_ops;
49195+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
49196+
49197+extern atomic_unchecked_t fscache_n_stores;
49198+extern atomic_unchecked_t fscache_n_stores_ok;
49199+extern atomic_unchecked_t fscache_n_stores_again;
49200+extern atomic_unchecked_t fscache_n_stores_nobufs;
49201+extern atomic_unchecked_t fscache_n_stores_oom;
49202+extern atomic_unchecked_t fscache_n_store_ops;
49203+extern atomic_unchecked_t fscache_n_store_calls;
49204+extern atomic_unchecked_t fscache_n_store_pages;
49205+extern atomic_unchecked_t fscache_n_store_radix_deletes;
49206+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
49207+
49208+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
49209+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
49210+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
49211+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
49212+
49213+extern atomic_unchecked_t fscache_n_marks;
49214+extern atomic_unchecked_t fscache_n_uncaches;
49215+
49216+extern atomic_unchecked_t fscache_n_acquires;
49217+extern atomic_unchecked_t fscache_n_acquires_null;
49218+extern atomic_unchecked_t fscache_n_acquires_no_cache;
49219+extern atomic_unchecked_t fscache_n_acquires_ok;
49220+extern atomic_unchecked_t fscache_n_acquires_nobufs;
49221+extern atomic_unchecked_t fscache_n_acquires_oom;
49222+
49223+extern atomic_unchecked_t fscache_n_updates;
49224+extern atomic_unchecked_t fscache_n_updates_null;
49225+extern atomic_unchecked_t fscache_n_updates_run;
49226+
49227+extern atomic_unchecked_t fscache_n_relinquishes;
49228+extern atomic_unchecked_t fscache_n_relinquishes_null;
49229+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
49230+extern atomic_unchecked_t fscache_n_relinquishes_retire;
49231+
49232+extern atomic_unchecked_t fscache_n_cookie_index;
49233+extern atomic_unchecked_t fscache_n_cookie_data;
49234+extern atomic_unchecked_t fscache_n_cookie_special;
49235+
49236+extern atomic_unchecked_t fscache_n_object_alloc;
49237+extern atomic_unchecked_t fscache_n_object_no_alloc;
49238+extern atomic_unchecked_t fscache_n_object_lookups;
49239+extern atomic_unchecked_t fscache_n_object_lookups_negative;
49240+extern atomic_unchecked_t fscache_n_object_lookups_positive;
49241+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
49242+extern atomic_unchecked_t fscache_n_object_created;
49243+extern atomic_unchecked_t fscache_n_object_avail;
49244+extern atomic_unchecked_t fscache_n_object_dead;
49245+
49246+extern atomic_unchecked_t fscache_n_checkaux_none;
49247+extern atomic_unchecked_t fscache_n_checkaux_okay;
49248+extern atomic_unchecked_t fscache_n_checkaux_update;
49249+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
49250
49251 extern atomic_t fscache_n_cop_alloc_object;
49252 extern atomic_t fscache_n_cop_lookup_object;
49253@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
49254 atomic_inc(stat);
49255 }
49256
49257+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
49258+{
49259+ atomic_inc_unchecked(stat);
49260+}
49261+
49262 static inline void fscache_stat_d(atomic_t *stat)
49263 {
49264 atomic_dec(stat);
49265@@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
49266
49267 #define __fscache_stat(stat) (NULL)
49268 #define fscache_stat(stat) do {} while (0)
49269+#define fscache_stat_unchecked(stat) do {} while (0)
49270 #define fscache_stat_d(stat) do {} while (0)
49271 #endif
49272
49273diff --git a/fs/fscache/object.c b/fs/fscache/object.c
49274index e513ac5..e888d34 100644
49275--- a/fs/fscache/object.c
49276+++ b/fs/fscache/object.c
49277@@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49278 /* update the object metadata on disk */
49279 case FSCACHE_OBJECT_UPDATING:
49280 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
49281- fscache_stat(&fscache_n_updates_run);
49282+ fscache_stat_unchecked(&fscache_n_updates_run);
49283 fscache_stat(&fscache_n_cop_update_object);
49284 object->cache->ops->update_object(object);
49285 fscache_stat_d(&fscache_n_cop_update_object);
49286@@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49287 spin_lock(&object->lock);
49288 object->state = FSCACHE_OBJECT_DEAD;
49289 spin_unlock(&object->lock);
49290- fscache_stat(&fscache_n_object_dead);
49291+ fscache_stat_unchecked(&fscache_n_object_dead);
49292 goto terminal_transit;
49293
49294 /* handle the parent cache of this object being withdrawn from
49295@@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49296 spin_lock(&object->lock);
49297 object->state = FSCACHE_OBJECT_DEAD;
49298 spin_unlock(&object->lock);
49299- fscache_stat(&fscache_n_object_dead);
49300+ fscache_stat_unchecked(&fscache_n_object_dead);
49301 goto terminal_transit;
49302
49303 /* complain about the object being woken up once it is
49304@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49305 parent->cookie->def->name, cookie->def->name,
49306 object->cache->tag->name);
49307
49308- fscache_stat(&fscache_n_object_lookups);
49309+ fscache_stat_unchecked(&fscache_n_object_lookups);
49310 fscache_stat(&fscache_n_cop_lookup_object);
49311 ret = object->cache->ops->lookup_object(object);
49312 fscache_stat_d(&fscache_n_cop_lookup_object);
49313@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49314 if (ret == -ETIMEDOUT) {
49315 /* probably stuck behind another object, so move this one to
49316 * the back of the queue */
49317- fscache_stat(&fscache_n_object_lookups_timed_out);
49318+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
49319 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49320 }
49321
49322@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
49323
49324 spin_lock(&object->lock);
49325 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49326- fscache_stat(&fscache_n_object_lookups_negative);
49327+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
49328
49329 /* transit here to allow write requests to begin stacking up
49330 * and read requests to begin returning ENODATA */
49331@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
49332 * result, in which case there may be data available */
49333 spin_lock(&object->lock);
49334 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49335- fscache_stat(&fscache_n_object_lookups_positive);
49336+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
49337
49338 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
49339
49340@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
49341 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49342 } else {
49343 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
49344- fscache_stat(&fscache_n_object_created);
49345+ fscache_stat_unchecked(&fscache_n_object_created);
49346
49347 object->state = FSCACHE_OBJECT_AVAILABLE;
49348 spin_unlock(&object->lock);
49349@@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
49350 fscache_enqueue_dependents(object);
49351
49352 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
49353- fscache_stat(&fscache_n_object_avail);
49354+ fscache_stat_unchecked(&fscache_n_object_avail);
49355
49356 _leave("");
49357 }
49358@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49359 enum fscache_checkaux result;
49360
49361 if (!object->cookie->def->check_aux) {
49362- fscache_stat(&fscache_n_checkaux_none);
49363+ fscache_stat_unchecked(&fscache_n_checkaux_none);
49364 return FSCACHE_CHECKAUX_OKAY;
49365 }
49366
49367@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49368 switch (result) {
49369 /* entry okay as is */
49370 case FSCACHE_CHECKAUX_OKAY:
49371- fscache_stat(&fscache_n_checkaux_okay);
49372+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
49373 break;
49374
49375 /* entry requires update */
49376 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
49377- fscache_stat(&fscache_n_checkaux_update);
49378+ fscache_stat_unchecked(&fscache_n_checkaux_update);
49379 break;
49380
49381 /* entry requires deletion */
49382 case FSCACHE_CHECKAUX_OBSOLETE:
49383- fscache_stat(&fscache_n_checkaux_obsolete);
49384+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
49385 break;
49386
49387 default:
49388diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
49389index 313e79a..775240f 100644
49390--- a/fs/fscache/operation.c
49391+++ b/fs/fscache/operation.c
49392@@ -16,7 +16,7 @@
49393 #include <linux/seq_file.h>
49394 #include "internal.h"
49395
49396-atomic_t fscache_op_debug_id;
49397+atomic_unchecked_t fscache_op_debug_id;
49398 EXPORT_SYMBOL(fscache_op_debug_id);
49399
49400 /**
49401@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
49402 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
49403 ASSERTCMP(atomic_read(&op->usage), >, 0);
49404
49405- fscache_stat(&fscache_n_op_enqueue);
49406+ fscache_stat_unchecked(&fscache_n_op_enqueue);
49407 switch (op->flags & FSCACHE_OP_TYPE) {
49408 case FSCACHE_OP_FAST:
49409 _debug("queue fast");
49410@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
49411 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
49412 if (op->processor)
49413 fscache_enqueue_operation(op);
49414- fscache_stat(&fscache_n_op_run);
49415+ fscache_stat_unchecked(&fscache_n_op_run);
49416 }
49417
49418 /*
49419@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49420 if (object->n_ops > 0) {
49421 atomic_inc(&op->usage);
49422 list_add_tail(&op->pend_link, &object->pending_ops);
49423- fscache_stat(&fscache_n_op_pend);
49424+ fscache_stat_unchecked(&fscache_n_op_pend);
49425 } else if (!list_empty(&object->pending_ops)) {
49426 atomic_inc(&op->usage);
49427 list_add_tail(&op->pend_link, &object->pending_ops);
49428- fscache_stat(&fscache_n_op_pend);
49429+ fscache_stat_unchecked(&fscache_n_op_pend);
49430 fscache_start_operations(object);
49431 } else {
49432 ASSERTCMP(object->n_in_progress, ==, 0);
49433@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49434 object->n_exclusive++; /* reads and writes must wait */
49435 atomic_inc(&op->usage);
49436 list_add_tail(&op->pend_link, &object->pending_ops);
49437- fscache_stat(&fscache_n_op_pend);
49438+ fscache_stat_unchecked(&fscache_n_op_pend);
49439 ret = 0;
49440 } else {
49441 /* not allowed to submit ops in any other state */
49442@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
49443 if (object->n_exclusive > 0) {
49444 atomic_inc(&op->usage);
49445 list_add_tail(&op->pend_link, &object->pending_ops);
49446- fscache_stat(&fscache_n_op_pend);
49447+ fscache_stat_unchecked(&fscache_n_op_pend);
49448 } else if (!list_empty(&object->pending_ops)) {
49449 atomic_inc(&op->usage);
49450 list_add_tail(&op->pend_link, &object->pending_ops);
49451- fscache_stat(&fscache_n_op_pend);
49452+ fscache_stat_unchecked(&fscache_n_op_pend);
49453 fscache_start_operations(object);
49454 } else {
49455 ASSERTCMP(object->n_exclusive, ==, 0);
49456@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
49457 object->n_ops++;
49458 atomic_inc(&op->usage);
49459 list_add_tail(&op->pend_link, &object->pending_ops);
49460- fscache_stat(&fscache_n_op_pend);
49461+ fscache_stat_unchecked(&fscache_n_op_pend);
49462 ret = 0;
49463 } else if (object->state == FSCACHE_OBJECT_DYING ||
49464 object->state == FSCACHE_OBJECT_LC_DYING ||
49465 object->state == FSCACHE_OBJECT_WITHDRAWING) {
49466- fscache_stat(&fscache_n_op_rejected);
49467+ fscache_stat_unchecked(&fscache_n_op_rejected);
49468 ret = -ENOBUFS;
49469 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
49470 fscache_report_unexpected_submission(object, op, ostate);
49471@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
49472
49473 ret = -EBUSY;
49474 if (!list_empty(&op->pend_link)) {
49475- fscache_stat(&fscache_n_op_cancelled);
49476+ fscache_stat_unchecked(&fscache_n_op_cancelled);
49477 list_del_init(&op->pend_link);
49478 object->n_ops--;
49479 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
49480@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
49481 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
49482 BUG();
49483
49484- fscache_stat(&fscache_n_op_release);
49485+ fscache_stat_unchecked(&fscache_n_op_release);
49486
49487 if (op->release) {
49488 op->release(op);
49489@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
49490 * lock, and defer it otherwise */
49491 if (!spin_trylock(&object->lock)) {
49492 _debug("defer put");
49493- fscache_stat(&fscache_n_op_deferred_release);
49494+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
49495
49496 cache = object->cache;
49497 spin_lock(&cache->op_gc_list_lock);
49498@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
49499
49500 _debug("GC DEFERRED REL OBJ%x OP%x",
49501 object->debug_id, op->debug_id);
49502- fscache_stat(&fscache_n_op_gc);
49503+ fscache_stat_unchecked(&fscache_n_op_gc);
49504
49505 ASSERTCMP(atomic_read(&op->usage), ==, 0);
49506
49507diff --git a/fs/fscache/page.c b/fs/fscache/page.c
49508index c598ea4..6aac13e 100644
49509--- a/fs/fscache/page.c
49510+++ b/fs/fscache/page.c
49511@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
49512 val = radix_tree_lookup(&cookie->stores, page->index);
49513 if (!val) {
49514 rcu_read_unlock();
49515- fscache_stat(&fscache_n_store_vmscan_not_storing);
49516+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
49517 __fscache_uncache_page(cookie, page);
49518 return true;
49519 }
49520@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
49521 spin_unlock(&cookie->stores_lock);
49522
49523 if (xpage) {
49524- fscache_stat(&fscache_n_store_vmscan_cancelled);
49525- fscache_stat(&fscache_n_store_radix_deletes);
49526+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
49527+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
49528 ASSERTCMP(xpage, ==, page);
49529 } else {
49530- fscache_stat(&fscache_n_store_vmscan_gone);
49531+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
49532 }
49533
49534 wake_up_bit(&cookie->flags, 0);
49535@@ -106,7 +106,7 @@ page_busy:
49536 /* we might want to wait here, but that could deadlock the allocator as
49537 * the slow-work threads writing to the cache may all end up sleeping
49538 * on memory allocation */
49539- fscache_stat(&fscache_n_store_vmscan_busy);
49540+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
49541 return false;
49542 }
49543 EXPORT_SYMBOL(__fscache_maybe_release_page);
49544@@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
49545 FSCACHE_COOKIE_STORING_TAG);
49546 if (!radix_tree_tag_get(&cookie->stores, page->index,
49547 FSCACHE_COOKIE_PENDING_TAG)) {
49548- fscache_stat(&fscache_n_store_radix_deletes);
49549+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
49550 xpage = radix_tree_delete(&cookie->stores, page->index);
49551 }
49552 spin_unlock(&cookie->stores_lock);
49553@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
49554
49555 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
49556
49557- fscache_stat(&fscache_n_attr_changed_calls);
49558+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
49559
49560 if (fscache_object_is_active(object)) {
49561 fscache_set_op_state(op, "CallFS");
49562@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
49563
49564 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
49565
49566- fscache_stat(&fscache_n_attr_changed);
49567+ fscache_stat_unchecked(&fscache_n_attr_changed);
49568
49569 op = kzalloc(sizeof(*op), GFP_KERNEL);
49570 if (!op) {
49571- fscache_stat(&fscache_n_attr_changed_nomem);
49572+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
49573 _leave(" = -ENOMEM");
49574 return -ENOMEM;
49575 }
49576@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
49577 if (fscache_submit_exclusive_op(object, op) < 0)
49578 goto nobufs;
49579 spin_unlock(&cookie->lock);
49580- fscache_stat(&fscache_n_attr_changed_ok);
49581+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
49582 fscache_put_operation(op);
49583 _leave(" = 0");
49584 return 0;
49585@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
49586 nobufs:
49587 spin_unlock(&cookie->lock);
49588 kfree(op);
49589- fscache_stat(&fscache_n_attr_changed_nobufs);
49590+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
49591 _leave(" = %d", -ENOBUFS);
49592 return -ENOBUFS;
49593 }
49594@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
49595 /* allocate a retrieval operation and attempt to submit it */
49596 op = kzalloc(sizeof(*op), GFP_NOIO);
49597 if (!op) {
49598- fscache_stat(&fscache_n_retrievals_nomem);
49599+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
49600 return NULL;
49601 }
49602
49603@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
49604 return 0;
49605 }
49606
49607- fscache_stat(&fscache_n_retrievals_wait);
49608+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
49609
49610 jif = jiffies;
49611 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
49612 fscache_wait_bit_interruptible,
49613 TASK_INTERRUPTIBLE) != 0) {
49614- fscache_stat(&fscache_n_retrievals_intr);
49615+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
49616 _leave(" = -ERESTARTSYS");
49617 return -ERESTARTSYS;
49618 }
49619@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
49620 */
49621 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
49622 struct fscache_retrieval *op,
49623- atomic_t *stat_op_waits,
49624- atomic_t *stat_object_dead)
49625+ atomic_unchecked_t *stat_op_waits,
49626+ atomic_unchecked_t *stat_object_dead)
49627 {
49628 int ret;
49629
49630@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
49631 goto check_if_dead;
49632
49633 _debug(">>> WT");
49634- fscache_stat(stat_op_waits);
49635+ fscache_stat_unchecked(stat_op_waits);
49636 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
49637 fscache_wait_bit_interruptible,
49638 TASK_INTERRUPTIBLE) < 0) {
49639@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
49640
49641 check_if_dead:
49642 if (unlikely(fscache_object_is_dead(object))) {
49643- fscache_stat(stat_object_dead);
49644+ fscache_stat_unchecked(stat_object_dead);
49645 return -ENOBUFS;
49646 }
49647 return 0;
49648@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
49649
49650 _enter("%p,%p,,,", cookie, page);
49651
49652- fscache_stat(&fscache_n_retrievals);
49653+ fscache_stat_unchecked(&fscache_n_retrievals);
49654
49655 if (hlist_empty(&cookie->backing_objects))
49656 goto nobufs;
49657@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
49658 goto nobufs_unlock;
49659 spin_unlock(&cookie->lock);
49660
49661- fscache_stat(&fscache_n_retrieval_ops);
49662+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
49663
49664 /* pin the netfs read context in case we need to do the actual netfs
49665 * read because we've encountered a cache read failure */
49666@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
49667
49668 error:
49669 if (ret == -ENOMEM)
49670- fscache_stat(&fscache_n_retrievals_nomem);
49671+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
49672 else if (ret == -ERESTARTSYS)
49673- fscache_stat(&fscache_n_retrievals_intr);
49674+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
49675 else if (ret == -ENODATA)
49676- fscache_stat(&fscache_n_retrievals_nodata);
49677+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
49678 else if (ret < 0)
49679- fscache_stat(&fscache_n_retrievals_nobufs);
49680+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49681 else
49682- fscache_stat(&fscache_n_retrievals_ok);
49683+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
49684
49685 fscache_put_retrieval(op);
49686 _leave(" = %d", ret);
49687@@ -453,7 +453,7 @@ nobufs_unlock:
49688 spin_unlock(&cookie->lock);
49689 kfree(op);
49690 nobufs:
49691- fscache_stat(&fscache_n_retrievals_nobufs);
49692+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49693 _leave(" = -ENOBUFS");
49694 return -ENOBUFS;
49695 }
49696@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
49697
49698 _enter("%p,,%d,,,", cookie, *nr_pages);
49699
49700- fscache_stat(&fscache_n_retrievals);
49701+ fscache_stat_unchecked(&fscache_n_retrievals);
49702
49703 if (hlist_empty(&cookie->backing_objects))
49704 goto nobufs;
49705@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
49706 goto nobufs_unlock;
49707 spin_unlock(&cookie->lock);
49708
49709- fscache_stat(&fscache_n_retrieval_ops);
49710+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
49711
49712 /* pin the netfs read context in case we need to do the actual netfs
49713 * read because we've encountered a cache read failure */
49714@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
49715
49716 error:
49717 if (ret == -ENOMEM)
49718- fscache_stat(&fscache_n_retrievals_nomem);
49719+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
49720 else if (ret == -ERESTARTSYS)
49721- fscache_stat(&fscache_n_retrievals_intr);
49722+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
49723 else if (ret == -ENODATA)
49724- fscache_stat(&fscache_n_retrievals_nodata);
49725+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
49726 else if (ret < 0)
49727- fscache_stat(&fscache_n_retrievals_nobufs);
49728+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49729 else
49730- fscache_stat(&fscache_n_retrievals_ok);
49731+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
49732
49733 fscache_put_retrieval(op);
49734 _leave(" = %d", ret);
49735@@ -570,7 +570,7 @@ nobufs_unlock:
49736 spin_unlock(&cookie->lock);
49737 kfree(op);
49738 nobufs:
49739- fscache_stat(&fscache_n_retrievals_nobufs);
49740+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49741 _leave(" = -ENOBUFS");
49742 return -ENOBUFS;
49743 }
49744@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
49745
49746 _enter("%p,%p,,,", cookie, page);
49747
49748- fscache_stat(&fscache_n_allocs);
49749+ fscache_stat_unchecked(&fscache_n_allocs);
49750
49751 if (hlist_empty(&cookie->backing_objects))
49752 goto nobufs;
49753@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
49754 goto nobufs_unlock;
49755 spin_unlock(&cookie->lock);
49756
49757- fscache_stat(&fscache_n_alloc_ops);
49758+ fscache_stat_unchecked(&fscache_n_alloc_ops);
49759
49760 ret = fscache_wait_for_retrieval_activation(
49761 object, op,
49762@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
49763
49764 error:
49765 if (ret == -ERESTARTSYS)
49766- fscache_stat(&fscache_n_allocs_intr);
49767+ fscache_stat_unchecked(&fscache_n_allocs_intr);
49768 else if (ret < 0)
49769- fscache_stat(&fscache_n_allocs_nobufs);
49770+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
49771 else
49772- fscache_stat(&fscache_n_allocs_ok);
49773+ fscache_stat_unchecked(&fscache_n_allocs_ok);
49774
49775 fscache_put_retrieval(op);
49776 _leave(" = %d", ret);
49777@@ -651,7 +651,7 @@ nobufs_unlock:
49778 spin_unlock(&cookie->lock);
49779 kfree(op);
49780 nobufs:
49781- fscache_stat(&fscache_n_allocs_nobufs);
49782+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
49783 _leave(" = -ENOBUFS");
49784 return -ENOBUFS;
49785 }
49786@@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
49787
49788 spin_lock(&cookie->stores_lock);
49789
49790- fscache_stat(&fscache_n_store_calls);
49791+ fscache_stat_unchecked(&fscache_n_store_calls);
49792
49793 /* find a page to store */
49794 page = NULL;
49795@@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
49796 page = results[0];
49797 _debug("gang %d [%lx]", n, page->index);
49798 if (page->index > op->store_limit) {
49799- fscache_stat(&fscache_n_store_pages_over_limit);
49800+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
49801 goto superseded;
49802 }
49803
49804@@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
49805
49806 if (page) {
49807 fscache_set_op_state(&op->op, "Store");
49808- fscache_stat(&fscache_n_store_pages);
49809+ fscache_stat_unchecked(&fscache_n_store_pages);
49810 fscache_stat(&fscache_n_cop_write_page);
49811 ret = object->cache->ops->write_page(op, page);
49812 fscache_stat_d(&fscache_n_cop_write_page);
49813@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49814 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
49815 ASSERT(PageFsCache(page));
49816
49817- fscache_stat(&fscache_n_stores);
49818+ fscache_stat_unchecked(&fscache_n_stores);
49819
49820 op = kzalloc(sizeof(*op), GFP_NOIO);
49821 if (!op)
49822@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49823 spin_unlock(&cookie->stores_lock);
49824 spin_unlock(&object->lock);
49825
49826- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
49827+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
49828 op->store_limit = object->store_limit;
49829
49830 if (fscache_submit_op(object, &op->op) < 0)
49831@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49832
49833 spin_unlock(&cookie->lock);
49834 radix_tree_preload_end();
49835- fscache_stat(&fscache_n_store_ops);
49836- fscache_stat(&fscache_n_stores_ok);
49837+ fscache_stat_unchecked(&fscache_n_store_ops);
49838+ fscache_stat_unchecked(&fscache_n_stores_ok);
49839
49840 /* the slow work queue now carries its own ref on the object */
49841 fscache_put_operation(&op->op);
49842@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49843 return 0;
49844
49845 already_queued:
49846- fscache_stat(&fscache_n_stores_again);
49847+ fscache_stat_unchecked(&fscache_n_stores_again);
49848 already_pending:
49849 spin_unlock(&cookie->stores_lock);
49850 spin_unlock(&object->lock);
49851 spin_unlock(&cookie->lock);
49852 radix_tree_preload_end();
49853 kfree(op);
49854- fscache_stat(&fscache_n_stores_ok);
49855+ fscache_stat_unchecked(&fscache_n_stores_ok);
49856 _leave(" = 0");
49857 return 0;
49858
49859@@ -886,14 +886,14 @@ nobufs:
49860 spin_unlock(&cookie->lock);
49861 radix_tree_preload_end();
49862 kfree(op);
49863- fscache_stat(&fscache_n_stores_nobufs);
49864+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
49865 _leave(" = -ENOBUFS");
49866 return -ENOBUFS;
49867
49868 nomem_free:
49869 kfree(op);
49870 nomem:
49871- fscache_stat(&fscache_n_stores_oom);
49872+ fscache_stat_unchecked(&fscache_n_stores_oom);
49873 _leave(" = -ENOMEM");
49874 return -ENOMEM;
49875 }
49876@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
49877 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
49878 ASSERTCMP(page, !=, NULL);
49879
49880- fscache_stat(&fscache_n_uncaches);
49881+ fscache_stat_unchecked(&fscache_n_uncaches);
49882
49883 /* cache withdrawal may beat us to it */
49884 if (!PageFsCache(page))
49885@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
49886 unsigned long loop;
49887
49888 #ifdef CONFIG_FSCACHE_STATS
49889- atomic_add(pagevec->nr, &fscache_n_marks);
49890+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
49891 #endif
49892
49893 for (loop = 0; loop < pagevec->nr; loop++) {
49894diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
49895index 46435f3..8cddf18 100644
49896--- a/fs/fscache/stats.c
49897+++ b/fs/fscache/stats.c
49898@@ -18,95 +18,95 @@
49899 /*
49900 * operation counters
49901 */
49902-atomic_t fscache_n_op_pend;
49903-atomic_t fscache_n_op_run;
49904-atomic_t fscache_n_op_enqueue;
49905-atomic_t fscache_n_op_requeue;
49906-atomic_t fscache_n_op_deferred_release;
49907-atomic_t fscache_n_op_release;
49908-atomic_t fscache_n_op_gc;
49909-atomic_t fscache_n_op_cancelled;
49910-atomic_t fscache_n_op_rejected;
49911-
49912-atomic_t fscache_n_attr_changed;
49913-atomic_t fscache_n_attr_changed_ok;
49914-atomic_t fscache_n_attr_changed_nobufs;
49915-atomic_t fscache_n_attr_changed_nomem;
49916-atomic_t fscache_n_attr_changed_calls;
49917-
49918-atomic_t fscache_n_allocs;
49919-atomic_t fscache_n_allocs_ok;
49920-atomic_t fscache_n_allocs_wait;
49921-atomic_t fscache_n_allocs_nobufs;
49922-atomic_t fscache_n_allocs_intr;
49923-atomic_t fscache_n_allocs_object_dead;
49924-atomic_t fscache_n_alloc_ops;
49925-atomic_t fscache_n_alloc_op_waits;
49926-
49927-atomic_t fscache_n_retrievals;
49928-atomic_t fscache_n_retrievals_ok;
49929-atomic_t fscache_n_retrievals_wait;
49930-atomic_t fscache_n_retrievals_nodata;
49931-atomic_t fscache_n_retrievals_nobufs;
49932-atomic_t fscache_n_retrievals_intr;
49933-atomic_t fscache_n_retrievals_nomem;
49934-atomic_t fscache_n_retrievals_object_dead;
49935-atomic_t fscache_n_retrieval_ops;
49936-atomic_t fscache_n_retrieval_op_waits;
49937-
49938-atomic_t fscache_n_stores;
49939-atomic_t fscache_n_stores_ok;
49940-atomic_t fscache_n_stores_again;
49941-atomic_t fscache_n_stores_nobufs;
49942-atomic_t fscache_n_stores_oom;
49943-atomic_t fscache_n_store_ops;
49944-atomic_t fscache_n_store_calls;
49945-atomic_t fscache_n_store_pages;
49946-atomic_t fscache_n_store_radix_deletes;
49947-atomic_t fscache_n_store_pages_over_limit;
49948-
49949-atomic_t fscache_n_store_vmscan_not_storing;
49950-atomic_t fscache_n_store_vmscan_gone;
49951-atomic_t fscache_n_store_vmscan_busy;
49952-atomic_t fscache_n_store_vmscan_cancelled;
49953-
49954-atomic_t fscache_n_marks;
49955-atomic_t fscache_n_uncaches;
49956-
49957-atomic_t fscache_n_acquires;
49958-atomic_t fscache_n_acquires_null;
49959-atomic_t fscache_n_acquires_no_cache;
49960-atomic_t fscache_n_acquires_ok;
49961-atomic_t fscache_n_acquires_nobufs;
49962-atomic_t fscache_n_acquires_oom;
49963-
49964-atomic_t fscache_n_updates;
49965-atomic_t fscache_n_updates_null;
49966-atomic_t fscache_n_updates_run;
49967-
49968-atomic_t fscache_n_relinquishes;
49969-atomic_t fscache_n_relinquishes_null;
49970-atomic_t fscache_n_relinquishes_waitcrt;
49971-atomic_t fscache_n_relinquishes_retire;
49972-
49973-atomic_t fscache_n_cookie_index;
49974-atomic_t fscache_n_cookie_data;
49975-atomic_t fscache_n_cookie_special;
49976-
49977-atomic_t fscache_n_object_alloc;
49978-atomic_t fscache_n_object_no_alloc;
49979-atomic_t fscache_n_object_lookups;
49980-atomic_t fscache_n_object_lookups_negative;
49981-atomic_t fscache_n_object_lookups_positive;
49982-atomic_t fscache_n_object_lookups_timed_out;
49983-atomic_t fscache_n_object_created;
49984-atomic_t fscache_n_object_avail;
49985-atomic_t fscache_n_object_dead;
49986-
49987-atomic_t fscache_n_checkaux_none;
49988-atomic_t fscache_n_checkaux_okay;
49989-atomic_t fscache_n_checkaux_update;
49990-atomic_t fscache_n_checkaux_obsolete;
49991+atomic_unchecked_t fscache_n_op_pend;
49992+atomic_unchecked_t fscache_n_op_run;
49993+atomic_unchecked_t fscache_n_op_enqueue;
49994+atomic_unchecked_t fscache_n_op_requeue;
49995+atomic_unchecked_t fscache_n_op_deferred_release;
49996+atomic_unchecked_t fscache_n_op_release;
49997+atomic_unchecked_t fscache_n_op_gc;
49998+atomic_unchecked_t fscache_n_op_cancelled;
49999+atomic_unchecked_t fscache_n_op_rejected;
50000+
50001+atomic_unchecked_t fscache_n_attr_changed;
50002+atomic_unchecked_t fscache_n_attr_changed_ok;
50003+atomic_unchecked_t fscache_n_attr_changed_nobufs;
50004+atomic_unchecked_t fscache_n_attr_changed_nomem;
50005+atomic_unchecked_t fscache_n_attr_changed_calls;
50006+
50007+atomic_unchecked_t fscache_n_allocs;
50008+atomic_unchecked_t fscache_n_allocs_ok;
50009+atomic_unchecked_t fscache_n_allocs_wait;
50010+atomic_unchecked_t fscache_n_allocs_nobufs;
50011+atomic_unchecked_t fscache_n_allocs_intr;
50012+atomic_unchecked_t fscache_n_allocs_object_dead;
50013+atomic_unchecked_t fscache_n_alloc_ops;
50014+atomic_unchecked_t fscache_n_alloc_op_waits;
50015+
50016+atomic_unchecked_t fscache_n_retrievals;
50017+atomic_unchecked_t fscache_n_retrievals_ok;
50018+atomic_unchecked_t fscache_n_retrievals_wait;
50019+atomic_unchecked_t fscache_n_retrievals_nodata;
50020+atomic_unchecked_t fscache_n_retrievals_nobufs;
50021+atomic_unchecked_t fscache_n_retrievals_intr;
50022+atomic_unchecked_t fscache_n_retrievals_nomem;
50023+atomic_unchecked_t fscache_n_retrievals_object_dead;
50024+atomic_unchecked_t fscache_n_retrieval_ops;
50025+atomic_unchecked_t fscache_n_retrieval_op_waits;
50026+
50027+atomic_unchecked_t fscache_n_stores;
50028+atomic_unchecked_t fscache_n_stores_ok;
50029+atomic_unchecked_t fscache_n_stores_again;
50030+atomic_unchecked_t fscache_n_stores_nobufs;
50031+atomic_unchecked_t fscache_n_stores_oom;
50032+atomic_unchecked_t fscache_n_store_ops;
50033+atomic_unchecked_t fscache_n_store_calls;
50034+atomic_unchecked_t fscache_n_store_pages;
50035+atomic_unchecked_t fscache_n_store_radix_deletes;
50036+atomic_unchecked_t fscache_n_store_pages_over_limit;
50037+
50038+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50039+atomic_unchecked_t fscache_n_store_vmscan_gone;
50040+atomic_unchecked_t fscache_n_store_vmscan_busy;
50041+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50042+
50043+atomic_unchecked_t fscache_n_marks;
50044+atomic_unchecked_t fscache_n_uncaches;
50045+
50046+atomic_unchecked_t fscache_n_acquires;
50047+atomic_unchecked_t fscache_n_acquires_null;
50048+atomic_unchecked_t fscache_n_acquires_no_cache;
50049+atomic_unchecked_t fscache_n_acquires_ok;
50050+atomic_unchecked_t fscache_n_acquires_nobufs;
50051+atomic_unchecked_t fscache_n_acquires_oom;
50052+
50053+atomic_unchecked_t fscache_n_updates;
50054+atomic_unchecked_t fscache_n_updates_null;
50055+atomic_unchecked_t fscache_n_updates_run;
50056+
50057+atomic_unchecked_t fscache_n_relinquishes;
50058+atomic_unchecked_t fscache_n_relinquishes_null;
50059+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50060+atomic_unchecked_t fscache_n_relinquishes_retire;
50061+
50062+atomic_unchecked_t fscache_n_cookie_index;
50063+atomic_unchecked_t fscache_n_cookie_data;
50064+atomic_unchecked_t fscache_n_cookie_special;
50065+
50066+atomic_unchecked_t fscache_n_object_alloc;
50067+atomic_unchecked_t fscache_n_object_no_alloc;
50068+atomic_unchecked_t fscache_n_object_lookups;
50069+atomic_unchecked_t fscache_n_object_lookups_negative;
50070+atomic_unchecked_t fscache_n_object_lookups_positive;
50071+atomic_unchecked_t fscache_n_object_lookups_timed_out;
50072+atomic_unchecked_t fscache_n_object_created;
50073+atomic_unchecked_t fscache_n_object_avail;
50074+atomic_unchecked_t fscache_n_object_dead;
50075+
50076+atomic_unchecked_t fscache_n_checkaux_none;
50077+atomic_unchecked_t fscache_n_checkaux_okay;
50078+atomic_unchecked_t fscache_n_checkaux_update;
50079+atomic_unchecked_t fscache_n_checkaux_obsolete;
50080
50081 atomic_t fscache_n_cop_alloc_object;
50082 atomic_t fscache_n_cop_lookup_object;
50083@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
50084 seq_puts(m, "FS-Cache statistics\n");
50085
50086 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
50087- atomic_read(&fscache_n_cookie_index),
50088- atomic_read(&fscache_n_cookie_data),
50089- atomic_read(&fscache_n_cookie_special));
50090+ atomic_read_unchecked(&fscache_n_cookie_index),
50091+ atomic_read_unchecked(&fscache_n_cookie_data),
50092+ atomic_read_unchecked(&fscache_n_cookie_special));
50093
50094 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
50095- atomic_read(&fscache_n_object_alloc),
50096- atomic_read(&fscache_n_object_no_alloc),
50097- atomic_read(&fscache_n_object_avail),
50098- atomic_read(&fscache_n_object_dead));
50099+ atomic_read_unchecked(&fscache_n_object_alloc),
50100+ atomic_read_unchecked(&fscache_n_object_no_alloc),
50101+ atomic_read_unchecked(&fscache_n_object_avail),
50102+ atomic_read_unchecked(&fscache_n_object_dead));
50103 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
50104- atomic_read(&fscache_n_checkaux_none),
50105- atomic_read(&fscache_n_checkaux_okay),
50106- atomic_read(&fscache_n_checkaux_update),
50107- atomic_read(&fscache_n_checkaux_obsolete));
50108+ atomic_read_unchecked(&fscache_n_checkaux_none),
50109+ atomic_read_unchecked(&fscache_n_checkaux_okay),
50110+ atomic_read_unchecked(&fscache_n_checkaux_update),
50111+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
50112
50113 seq_printf(m, "Pages : mrk=%u unc=%u\n",
50114- atomic_read(&fscache_n_marks),
50115- atomic_read(&fscache_n_uncaches));
50116+ atomic_read_unchecked(&fscache_n_marks),
50117+ atomic_read_unchecked(&fscache_n_uncaches));
50118
50119 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
50120 " oom=%u\n",
50121- atomic_read(&fscache_n_acquires),
50122- atomic_read(&fscache_n_acquires_null),
50123- atomic_read(&fscache_n_acquires_no_cache),
50124- atomic_read(&fscache_n_acquires_ok),
50125- atomic_read(&fscache_n_acquires_nobufs),
50126- atomic_read(&fscache_n_acquires_oom));
50127+ atomic_read_unchecked(&fscache_n_acquires),
50128+ atomic_read_unchecked(&fscache_n_acquires_null),
50129+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
50130+ atomic_read_unchecked(&fscache_n_acquires_ok),
50131+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
50132+ atomic_read_unchecked(&fscache_n_acquires_oom));
50133
50134 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
50135- atomic_read(&fscache_n_object_lookups),
50136- atomic_read(&fscache_n_object_lookups_negative),
50137- atomic_read(&fscache_n_object_lookups_positive),
50138- atomic_read(&fscache_n_object_lookups_timed_out),
50139- atomic_read(&fscache_n_object_created));
50140+ atomic_read_unchecked(&fscache_n_object_lookups),
50141+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
50142+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
50143+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
50144+ atomic_read_unchecked(&fscache_n_object_created));
50145
50146 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
50147- atomic_read(&fscache_n_updates),
50148- atomic_read(&fscache_n_updates_null),
50149- atomic_read(&fscache_n_updates_run));
50150+ atomic_read_unchecked(&fscache_n_updates),
50151+ atomic_read_unchecked(&fscache_n_updates_null),
50152+ atomic_read_unchecked(&fscache_n_updates_run));
50153
50154 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
50155- atomic_read(&fscache_n_relinquishes),
50156- atomic_read(&fscache_n_relinquishes_null),
50157- atomic_read(&fscache_n_relinquishes_waitcrt),
50158- atomic_read(&fscache_n_relinquishes_retire));
50159+ atomic_read_unchecked(&fscache_n_relinquishes),
50160+ atomic_read_unchecked(&fscache_n_relinquishes_null),
50161+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
50162+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
50163
50164 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
50165- atomic_read(&fscache_n_attr_changed),
50166- atomic_read(&fscache_n_attr_changed_ok),
50167- atomic_read(&fscache_n_attr_changed_nobufs),
50168- atomic_read(&fscache_n_attr_changed_nomem),
50169- atomic_read(&fscache_n_attr_changed_calls));
50170+ atomic_read_unchecked(&fscache_n_attr_changed),
50171+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
50172+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
50173+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
50174+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
50175
50176 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
50177- atomic_read(&fscache_n_allocs),
50178- atomic_read(&fscache_n_allocs_ok),
50179- atomic_read(&fscache_n_allocs_wait),
50180- atomic_read(&fscache_n_allocs_nobufs),
50181- atomic_read(&fscache_n_allocs_intr));
50182+ atomic_read_unchecked(&fscache_n_allocs),
50183+ atomic_read_unchecked(&fscache_n_allocs_ok),
50184+ atomic_read_unchecked(&fscache_n_allocs_wait),
50185+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
50186+ atomic_read_unchecked(&fscache_n_allocs_intr));
50187 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
50188- atomic_read(&fscache_n_alloc_ops),
50189- atomic_read(&fscache_n_alloc_op_waits),
50190- atomic_read(&fscache_n_allocs_object_dead));
50191+ atomic_read_unchecked(&fscache_n_alloc_ops),
50192+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
50193+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
50194
50195 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
50196 " int=%u oom=%u\n",
50197- atomic_read(&fscache_n_retrievals),
50198- atomic_read(&fscache_n_retrievals_ok),
50199- atomic_read(&fscache_n_retrievals_wait),
50200- atomic_read(&fscache_n_retrievals_nodata),
50201- atomic_read(&fscache_n_retrievals_nobufs),
50202- atomic_read(&fscache_n_retrievals_intr),
50203- atomic_read(&fscache_n_retrievals_nomem));
50204+ atomic_read_unchecked(&fscache_n_retrievals),
50205+ atomic_read_unchecked(&fscache_n_retrievals_ok),
50206+ atomic_read_unchecked(&fscache_n_retrievals_wait),
50207+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
50208+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
50209+ atomic_read_unchecked(&fscache_n_retrievals_intr),
50210+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
50211 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
50212- atomic_read(&fscache_n_retrieval_ops),
50213- atomic_read(&fscache_n_retrieval_op_waits),
50214- atomic_read(&fscache_n_retrievals_object_dead));
50215+ atomic_read_unchecked(&fscache_n_retrieval_ops),
50216+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
50217+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
50218
50219 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
50220- atomic_read(&fscache_n_stores),
50221- atomic_read(&fscache_n_stores_ok),
50222- atomic_read(&fscache_n_stores_again),
50223- atomic_read(&fscache_n_stores_nobufs),
50224- atomic_read(&fscache_n_stores_oom));
50225+ atomic_read_unchecked(&fscache_n_stores),
50226+ atomic_read_unchecked(&fscache_n_stores_ok),
50227+ atomic_read_unchecked(&fscache_n_stores_again),
50228+ atomic_read_unchecked(&fscache_n_stores_nobufs),
50229+ atomic_read_unchecked(&fscache_n_stores_oom));
50230 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
50231- atomic_read(&fscache_n_store_ops),
50232- atomic_read(&fscache_n_store_calls),
50233- atomic_read(&fscache_n_store_pages),
50234- atomic_read(&fscache_n_store_radix_deletes),
50235- atomic_read(&fscache_n_store_pages_over_limit));
50236+ atomic_read_unchecked(&fscache_n_store_ops),
50237+ atomic_read_unchecked(&fscache_n_store_calls),
50238+ atomic_read_unchecked(&fscache_n_store_pages),
50239+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
50240+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
50241
50242 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
50243- atomic_read(&fscache_n_store_vmscan_not_storing),
50244- atomic_read(&fscache_n_store_vmscan_gone),
50245- atomic_read(&fscache_n_store_vmscan_busy),
50246- atomic_read(&fscache_n_store_vmscan_cancelled));
50247+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
50248+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
50249+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
50250+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
50251
50252 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
50253- atomic_read(&fscache_n_op_pend),
50254- atomic_read(&fscache_n_op_run),
50255- atomic_read(&fscache_n_op_enqueue),
50256- atomic_read(&fscache_n_op_cancelled),
50257- atomic_read(&fscache_n_op_rejected));
50258+ atomic_read_unchecked(&fscache_n_op_pend),
50259+ atomic_read_unchecked(&fscache_n_op_run),
50260+ atomic_read_unchecked(&fscache_n_op_enqueue),
50261+ atomic_read_unchecked(&fscache_n_op_cancelled),
50262+ atomic_read_unchecked(&fscache_n_op_rejected));
50263 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
50264- atomic_read(&fscache_n_op_deferred_release),
50265- atomic_read(&fscache_n_op_release),
50266- atomic_read(&fscache_n_op_gc));
50267+ atomic_read_unchecked(&fscache_n_op_deferred_release),
50268+ atomic_read_unchecked(&fscache_n_op_release),
50269+ atomic_read_unchecked(&fscache_n_op_gc));
50270
50271 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
50272 atomic_read(&fscache_n_cop_alloc_object),
50273diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
50274index de792dc..448b532 100644
50275--- a/fs/fuse/cuse.c
50276+++ b/fs/fuse/cuse.c
50277@@ -576,10 +576,12 @@ static int __init cuse_init(void)
50278 INIT_LIST_HEAD(&cuse_conntbl[i]);
50279
50280 /* inherit and extend fuse_dev_operations */
50281- cuse_channel_fops = fuse_dev_operations;
50282- cuse_channel_fops.owner = THIS_MODULE;
50283- cuse_channel_fops.open = cuse_channel_open;
50284- cuse_channel_fops.release = cuse_channel_release;
50285+ pax_open_kernel();
50286+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
50287+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
50288+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
50289+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
50290+ pax_close_kernel();
50291
50292 cuse_class = class_create(THIS_MODULE, "cuse");
50293 if (IS_ERR(cuse_class))
50294diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
50295index 1facb39..7f48557 100644
50296--- a/fs/fuse/dev.c
50297+++ b/fs/fuse/dev.c
50298@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50299 {
50300 struct fuse_notify_inval_entry_out outarg;
50301 int err = -EINVAL;
50302- char buf[FUSE_NAME_MAX+1];
50303+ char *buf = NULL;
50304 struct qstr name;
50305
50306 if (size < sizeof(outarg))
50307@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50308 if (outarg.namelen > FUSE_NAME_MAX)
50309 goto err;
50310
50311+ err = -ENOMEM;
50312+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
50313+ if (!buf)
50314+ goto err;
50315+
50316 err = -EINVAL;
50317 if (size != sizeof(outarg) + outarg.namelen + 1)
50318 goto err;
50319@@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50320
50321 down_read(&fc->killsb);
50322 err = -ENOENT;
50323- if (!fc->sb)
50324- goto err_unlock;
50325-
50326- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50327-
50328-err_unlock:
50329+ if (fc->sb)
50330+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50331 up_read(&fc->killsb);
50332+ kfree(buf);
50333 return err;
50334
50335 err:
50336 fuse_copy_finish(cs);
50337+ kfree(buf);
50338 return err;
50339 }
50340
50341diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
50342index 4787ae6..73efff7 100644
50343--- a/fs/fuse/dir.c
50344+++ b/fs/fuse/dir.c
50345@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
50346 return link;
50347 }
50348
50349-static void free_link(char *link)
50350+static void free_link(const char *link)
50351 {
50352 if (!IS_ERR(link))
50353 free_page((unsigned long) link);
50354diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
50355index 247436c..e650ccb 100644
50356--- a/fs/gfs2/ops_inode.c
50357+++ b/fs/gfs2/ops_inode.c
50358@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
50359 unsigned int x;
50360 int error;
50361
50362+ pax_track_stack();
50363+
50364 if (ndentry->d_inode) {
50365 nip = GFS2_I(ndentry->d_inode);
50366 if (ip == nip)
50367diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
50368index 4463297..4fed53b 100644
50369--- a/fs/gfs2/sys.c
50370+++ b/fs/gfs2/sys.c
50371@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
50372 return a->store ? a->store(sdp, buf, len) : len;
50373 }
50374
50375-static struct sysfs_ops gfs2_attr_ops = {
50376+static const struct sysfs_ops gfs2_attr_ops = {
50377 .show = gfs2_attr_show,
50378 .store = gfs2_attr_store,
50379 };
50380@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
50381 return 0;
50382 }
50383
50384-static struct kset_uevent_ops gfs2_uevent_ops = {
50385+static const struct kset_uevent_ops gfs2_uevent_ops = {
50386 .uevent = gfs2_uevent,
50387 };
50388
50389diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
50390index 052f214..2462c5b 100644
50391--- a/fs/hfs/btree.c
50392+++ b/fs/hfs/btree.c
50393@@ -45,11 +45,27 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
50394 case HFS_EXT_CNID:
50395 hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
50396 mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
50397+
50398+ if (HFS_I(tree->inode)->alloc_blocks >
50399+ HFS_I(tree->inode)->first_blocks) {
50400+ printk(KERN_ERR "hfs: invalid btree extent records\n");
50401+ unlock_new_inode(tree->inode);
50402+ goto free_inode;
50403+ }
50404+
50405 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
50406 break;
50407 case HFS_CAT_CNID:
50408 hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
50409 mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
50410+
50411+ if (!HFS_I(tree->inode)->first_blocks) {
50412+ printk(KERN_ERR "hfs: invalid btree extent records "
50413+ "(0 size).\n");
50414+ unlock_new_inode(tree->inode);
50415+ goto free_inode;
50416+ }
50417+
50418 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
50419 break;
50420 default:
50421@@ -58,11 +74,6 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
50422 }
50423 unlock_new_inode(tree->inode);
50424
50425- if (!HFS_I(tree->inode)->first_blocks) {
50426- printk(KERN_ERR "hfs: invalid btree extent records (0 size).\n");
50427- goto free_inode;
50428- }
50429-
50430 mapping = tree->inode->i_mapping;
50431 page = read_mapping_page(mapping, 0, NULL);
50432 if (IS_ERR(page))
50433diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
50434index f6874ac..7cd98a8 100644
50435--- a/fs/hfsplus/catalog.c
50436+++ b/fs/hfsplus/catalog.c
50437@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
50438 int err;
50439 u16 type;
50440
50441+ pax_track_stack();
50442+
50443 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
50444 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
50445 if (err)
50446@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
50447 int entry_size;
50448 int err;
50449
50450+ pax_track_stack();
50451+
50452 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
50453 sb = dir->i_sb;
50454 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
50455@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
50456 int entry_size, type;
50457 int err = 0;
50458
50459+ pax_track_stack();
50460+
50461 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
50462 dst_dir->i_ino, dst_name->name);
50463 sb = src_dir->i_sb;
50464diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
50465index 5f40236..dac3421 100644
50466--- a/fs/hfsplus/dir.c
50467+++ b/fs/hfsplus/dir.c
50468@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
50469 struct hfsplus_readdir_data *rd;
50470 u16 type;
50471
50472+ pax_track_stack();
50473+
50474 if (filp->f_pos >= inode->i_size)
50475 return 0;
50476
50477diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
50478index 1bcf597..905a251 100644
50479--- a/fs/hfsplus/inode.c
50480+++ b/fs/hfsplus/inode.c
50481@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
50482 int res = 0;
50483 u16 type;
50484
50485+ pax_track_stack();
50486+
50487 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
50488
50489 HFSPLUS_I(inode).dev = 0;
50490@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
50491 struct hfs_find_data fd;
50492 hfsplus_cat_entry entry;
50493
50494+ pax_track_stack();
50495+
50496 if (HFSPLUS_IS_RSRC(inode))
50497 main_inode = HFSPLUS_I(inode).rsrc_inode;
50498
50499diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
50500index f457d2c..7ef4ad5 100644
50501--- a/fs/hfsplus/ioctl.c
50502+++ b/fs/hfsplus/ioctl.c
50503@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
50504 struct hfsplus_cat_file *file;
50505 int res;
50506
50507+ pax_track_stack();
50508+
50509 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
50510 return -EOPNOTSUPP;
50511
50512@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
50513 struct hfsplus_cat_file *file;
50514 ssize_t res = 0;
50515
50516+ pax_track_stack();
50517+
50518 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
50519 return -EOPNOTSUPP;
50520
50521diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
50522index 43022f3..7298079 100644
50523--- a/fs/hfsplus/super.c
50524+++ b/fs/hfsplus/super.c
50525@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
50526 struct nls_table *nls = NULL;
50527 int err = -EINVAL;
50528
50529+ pax_track_stack();
50530+
50531 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
50532 if (!sbi)
50533 return -ENOMEM;
50534diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
50535index 87a1258..5694d91 100644
50536--- a/fs/hugetlbfs/inode.c
50537+++ b/fs/hugetlbfs/inode.c
50538@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
50539 .kill_sb = kill_litter_super,
50540 };
50541
50542-static struct vfsmount *hugetlbfs_vfsmount;
50543+struct vfsmount *hugetlbfs_vfsmount;
50544
50545 static int can_do_hugetlb_shm(void)
50546 {
50547diff --git a/fs/ioctl.c b/fs/ioctl.c
50548index 6c75110..19d2c3c 100644
50549--- a/fs/ioctl.c
50550+++ b/fs/ioctl.c
50551@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
50552 u64 phys, u64 len, u32 flags)
50553 {
50554 struct fiemap_extent extent;
50555- struct fiemap_extent *dest = fieinfo->fi_extents_start;
50556+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
50557
50558 /* only count the extents */
50559 if (fieinfo->fi_extents_max == 0) {
50560@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
50561
50562 fieinfo.fi_flags = fiemap.fm_flags;
50563 fieinfo.fi_extents_max = fiemap.fm_extent_count;
50564- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
50565+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
50566
50567 if (fiemap.fm_extent_count != 0 &&
50568 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
50569@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
50570 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
50571 fiemap.fm_flags = fieinfo.fi_flags;
50572 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
50573- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
50574+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
50575 error = -EFAULT;
50576
50577 return error;
50578diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
50579index b0435dd..81ee0be 100644
50580--- a/fs/jbd/checkpoint.c
50581+++ b/fs/jbd/checkpoint.c
50582@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
50583 tid_t this_tid;
50584 int result;
50585
50586+ pax_track_stack();
50587+
50588 jbd_debug(1, "Start checkpoint\n");
50589
50590 /*
50591diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
50592index 546d153..736896c 100644
50593--- a/fs/jffs2/compr_rtime.c
50594+++ b/fs/jffs2/compr_rtime.c
50595@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
50596 int outpos = 0;
50597 int pos=0;
50598
50599+ pax_track_stack();
50600+
50601 memset(positions,0,sizeof(positions));
50602
50603 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
50604@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
50605 int outpos = 0;
50606 int pos=0;
50607
50608+ pax_track_stack();
50609+
50610 memset(positions,0,sizeof(positions));
50611
50612 while (outpos<destlen) {
50613diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
50614index 170d289..3254b98 100644
50615--- a/fs/jffs2/compr_rubin.c
50616+++ b/fs/jffs2/compr_rubin.c
50617@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
50618 int ret;
50619 uint32_t mysrclen, mydstlen;
50620
50621+ pax_track_stack();
50622+
50623 mysrclen = *sourcelen;
50624 mydstlen = *dstlen - 8;
50625
50626diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
50627index b47679b..00d65d3 100644
50628--- a/fs/jffs2/erase.c
50629+++ b/fs/jffs2/erase.c
50630@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
50631 struct jffs2_unknown_node marker = {
50632 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
50633 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
50634- .totlen = cpu_to_je32(c->cleanmarker_size)
50635+ .totlen = cpu_to_je32(c->cleanmarker_size),
50636+ .hdr_crc = cpu_to_je32(0)
50637 };
50638
50639 jffs2_prealloc_raw_node_refs(c, jeb, 1);
50640diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
50641index 5ef7bac..4fd1e3c 100644
50642--- a/fs/jffs2/wbuf.c
50643+++ b/fs/jffs2/wbuf.c
50644@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
50645 {
50646 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
50647 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
50648- .totlen = constant_cpu_to_je32(8)
50649+ .totlen = constant_cpu_to_je32(8),
50650+ .hdr_crc = constant_cpu_to_je32(0)
50651 };
50652
50653 /*
50654diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
50655index 082e844..52012a1 100644
50656--- a/fs/jffs2/xattr.c
50657+++ b/fs/jffs2/xattr.c
50658@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
50659
50660 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
50661
50662+ pax_track_stack();
50663+
50664 /* Phase.1 : Merge same xref */
50665 for (i=0; i < XREF_TMPHASH_SIZE; i++)
50666 xref_tmphash[i] = NULL;
50667diff --git a/fs/jfs/super.c b/fs/jfs/super.c
50668index 2234c73..f6e6e6b 100644
50669--- a/fs/jfs/super.c
50670+++ b/fs/jfs/super.c
50671@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
50672
50673 jfs_inode_cachep =
50674 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
50675- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
50676+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
50677 init_once);
50678 if (jfs_inode_cachep == NULL)
50679 return -ENOMEM;
50680diff --git a/fs/libfs.c b/fs/libfs.c
50681index ba36e93..3153fce 100644
50682--- a/fs/libfs.c
50683+++ b/fs/libfs.c
50684@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
50685
50686 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
50687 struct dentry *next;
50688+ char d_name[sizeof(next->d_iname)];
50689+ const unsigned char *name;
50690+
50691 next = list_entry(p, struct dentry, d_u.d_child);
50692 if (d_unhashed(next) || !next->d_inode)
50693 continue;
50694
50695 spin_unlock(&dcache_lock);
50696- if (filldir(dirent, next->d_name.name,
50697+ name = next->d_name.name;
50698+ if (name == next->d_iname) {
50699+ memcpy(d_name, name, next->d_name.len);
50700+ name = d_name;
50701+ }
50702+ if (filldir(dirent, name,
50703 next->d_name.len, filp->f_pos,
50704 next->d_inode->i_ino,
50705 dt_type(next->d_inode)) < 0)
50706diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
50707index c325a83..d15b07b 100644
50708--- a/fs/lockd/clntproc.c
50709+++ b/fs/lockd/clntproc.c
50710@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
50711 /*
50712 * Cookie counter for NLM requests
50713 */
50714-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
50715+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
50716
50717 void nlmclnt_next_cookie(struct nlm_cookie *c)
50718 {
50719- u32 cookie = atomic_inc_return(&nlm_cookie);
50720+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
50721
50722 memcpy(c->data, &cookie, 4);
50723 c->len=4;
50724@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
50725 struct nlm_rqst reqst, *req;
50726 int status;
50727
50728+ pax_track_stack();
50729+
50730 req = &reqst;
50731 memset(req, 0, sizeof(*req));
50732 locks_init_lock(&req->a_args.lock.fl);
50733diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
50734index 1a54ae1..6a16c27 100644
50735--- a/fs/lockd/svc.c
50736+++ b/fs/lockd/svc.c
50737@@ -43,7 +43,7 @@
50738
50739 static struct svc_program nlmsvc_program;
50740
50741-struct nlmsvc_binding * nlmsvc_ops;
50742+const struct nlmsvc_binding * nlmsvc_ops;
50743 EXPORT_SYMBOL_GPL(nlmsvc_ops);
50744
50745 static DEFINE_MUTEX(nlmsvc_mutex);
50746diff --git a/fs/locks.c b/fs/locks.c
50747index a8794f2..4041e55 100644
50748--- a/fs/locks.c
50749+++ b/fs/locks.c
50750@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
50751
50752 static struct kmem_cache *filelock_cache __read_mostly;
50753
50754+static void locks_init_lock_always(struct file_lock *fl)
50755+{
50756+ fl->fl_next = NULL;
50757+ fl->fl_fasync = NULL;
50758+ fl->fl_owner = NULL;
50759+ fl->fl_pid = 0;
50760+ fl->fl_nspid = NULL;
50761+ fl->fl_file = NULL;
50762+ fl->fl_flags = 0;
50763+ fl->fl_type = 0;
50764+ fl->fl_start = fl->fl_end = 0;
50765+}
50766+
50767 /* Allocate an empty lock structure. */
50768 static struct file_lock *locks_alloc_lock(void)
50769 {
50770- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
50771+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
50772+
50773+ if (fl)
50774+ locks_init_lock_always(fl);
50775+
50776+ return fl;
50777 }
50778
50779 void locks_release_private(struct file_lock *fl)
50780@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
50781 INIT_LIST_HEAD(&fl->fl_link);
50782 INIT_LIST_HEAD(&fl->fl_block);
50783 init_waitqueue_head(&fl->fl_wait);
50784- fl->fl_next = NULL;
50785- fl->fl_fasync = NULL;
50786- fl->fl_owner = NULL;
50787- fl->fl_pid = 0;
50788- fl->fl_nspid = NULL;
50789- fl->fl_file = NULL;
50790- fl->fl_flags = 0;
50791- fl->fl_type = 0;
50792- fl->fl_start = fl->fl_end = 0;
50793 fl->fl_ops = NULL;
50794 fl->fl_lmops = NULL;
50795+ locks_init_lock_always(fl);
50796 }
50797
50798 EXPORT_SYMBOL(locks_init_lock);
50799@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
50800 return;
50801
50802 if (filp->f_op && filp->f_op->flock) {
50803- struct file_lock fl = {
50804+ struct file_lock flock = {
50805 .fl_pid = current->tgid,
50806 .fl_file = filp,
50807 .fl_flags = FL_FLOCK,
50808 .fl_type = F_UNLCK,
50809 .fl_end = OFFSET_MAX,
50810 };
50811- filp->f_op->flock(filp, F_SETLKW, &fl);
50812- if (fl.fl_ops && fl.fl_ops->fl_release_private)
50813- fl.fl_ops->fl_release_private(&fl);
50814+ filp->f_op->flock(filp, F_SETLKW, &flock);
50815+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
50816+ flock.fl_ops->fl_release_private(&flock);
50817 }
50818
50819 lock_kernel();
50820diff --git a/fs/mbcache.c b/fs/mbcache.c
50821index ec88ff3..b843a82 100644
50822--- a/fs/mbcache.c
50823+++ b/fs/mbcache.c
50824@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
50825 if (!cache)
50826 goto fail;
50827 cache->c_name = name;
50828- cache->c_op.free = NULL;
50829+ *(void **)&cache->c_op.free = NULL;
50830 if (cache_op)
50831- cache->c_op.free = cache_op->free;
50832+ *(void **)&cache->c_op.free = cache_op->free;
50833 atomic_set(&cache->c_entry_count, 0);
50834 cache->c_bucket_bits = bucket_bits;
50835 #ifdef MB_CACHE_INDEXES_COUNT
50836diff --git a/fs/namei.c b/fs/namei.c
50837index b0afbd4..8d065a1 100644
50838--- a/fs/namei.c
50839+++ b/fs/namei.c
50840@@ -224,14 +224,6 @@ int generic_permission(struct inode *inode, int mask,
50841 return ret;
50842
50843 /*
50844- * Read/write DACs are always overridable.
50845- * Executable DACs are overridable if at least one exec bit is set.
50846- */
50847- if (!(mask & MAY_EXEC) || execute_ok(inode))
50848- if (capable(CAP_DAC_OVERRIDE))
50849- return 0;
50850-
50851- /*
50852 * Searching includes executable on directories, else just read.
50853 */
50854 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
50855@@ -239,6 +231,14 @@ int generic_permission(struct inode *inode, int mask,
50856 if (capable(CAP_DAC_READ_SEARCH))
50857 return 0;
50858
50859+ /*
50860+ * Read/write DACs are always overridable.
50861+ * Executable DACs are overridable if at least one exec bit is set.
50862+ */
50863+ if (!(mask & MAY_EXEC) || execute_ok(inode))
50864+ if (capable(CAP_DAC_OVERRIDE))
50865+ return 0;
50866+
50867 return -EACCES;
50868 }
50869
50870@@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
50871 if (!ret)
50872 goto ok;
50873
50874- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
50875+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
50876+ capable(CAP_DAC_OVERRIDE))
50877 goto ok;
50878
50879 return ret;
50880@@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
50881 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
50882 error = PTR_ERR(cookie);
50883 if (!IS_ERR(cookie)) {
50884- char *s = nd_get_link(nd);
50885+ const char *s = nd_get_link(nd);
50886 error = 0;
50887 if (s)
50888 error = __vfs_follow_link(nd, s);
50889@@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
50890 err = security_inode_follow_link(path->dentry, nd);
50891 if (err)
50892 goto loop;
50893+
50894+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
50895+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
50896+ err = -EACCES;
50897+ goto loop;
50898+ }
50899+
50900 current->link_count++;
50901 current->total_link_count++;
50902 nd->depth++;
50903@@ -1016,11 +1024,19 @@ return_reval:
50904 break;
50905 }
50906 return_base:
50907+ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
50908+ !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
50909+ path_put(&nd->path);
50910+ return -ENOENT;
50911+ }
50912 return 0;
50913 out_dput:
50914 path_put_conditional(&next, nd);
50915 break;
50916 }
50917+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
50918+ err = -ENOENT;
50919+
50920 path_put(&nd->path);
50921 return_err:
50922 return err;
50923@@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
50924 int retval = path_init(dfd, name, flags, nd);
50925 if (!retval)
50926 retval = path_walk(name, nd);
50927- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
50928- nd->path.dentry->d_inode))
50929- audit_inode(name, nd->path.dentry);
50930+
50931+ if (likely(!retval)) {
50932+ if (nd->path.dentry && nd->path.dentry->d_inode) {
50933+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
50934+ retval = -ENOENT;
50935+ if (!audit_dummy_context())
50936+ audit_inode(name, nd->path.dentry);
50937+ }
50938+ }
50939 if (nd->root.mnt) {
50940 path_put(&nd->root);
50941 nd->root.mnt = NULL;
50942 }
50943+
50944 return retval;
50945 }
50946
50947@@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
50948 if (error)
50949 goto err_out;
50950
50951+
50952+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
50953+ error = -EPERM;
50954+ goto err_out;
50955+ }
50956+ if (gr_handle_rawio(inode)) {
50957+ error = -EPERM;
50958+ goto err_out;
50959+ }
50960+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
50961+ error = -EACCES;
50962+ goto err_out;
50963+ }
50964+
50965 if (flag & O_TRUNC) {
50966 error = get_write_access(inode);
50967 if (error)
50968@@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
50969 {
50970 int error;
50971 struct dentry *dir = nd->path.dentry;
50972+ int acc_mode = ACC_MODE(flag);
50973+
50974+ if (flag & O_TRUNC)
50975+ acc_mode |= MAY_WRITE;
50976+ if (flag & O_APPEND)
50977+ acc_mode |= MAY_APPEND;
50978+
50979+ if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
50980+ error = -EACCES;
50981+ goto out_unlock;
50982+ }
50983
50984 if (!IS_POSIXACL(dir->d_inode))
50985 mode &= ~current_umask();
50986@@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
50987 if (error)
50988 goto out_unlock;
50989 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
50990+ if (!error)
50991+ gr_handle_create(path->dentry, nd->path.mnt);
50992 out_unlock:
50993 mutex_unlock(&dir->d_inode->i_mutex);
50994 dput(nd->path.dentry);
50995@@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
50996 &nd, flag);
50997 if (error)
50998 return ERR_PTR(error);
50999+
51000+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
51001+ error = -EPERM;
51002+ goto exit;
51003+ }
51004+
51005+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
51006+ error = -EPERM;
51007+ goto exit;
51008+ }
51009+
51010+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
51011+ error = -EACCES;
51012+ goto exit;
51013+ }
51014+
51015 goto ok;
51016 }
51017
51018@@ -1795,6 +1861,19 @@ do_last:
51019 /*
51020 * It already exists.
51021 */
51022+
51023+ if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
51024+ error = -ENOENT;
51025+ goto exit_mutex_unlock;
51026+ }
51027+
51028+ /* only check if O_CREAT is specified, all other checks need
51029+ to go into may_open */
51030+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
51031+ error = -EACCES;
51032+ goto exit_mutex_unlock;
51033+ }
51034+
51035 mutex_unlock(&dir->d_inode->i_mutex);
51036 audit_inode(pathname, path.dentry);
51037
51038@@ -1887,6 +1966,13 @@ do_link:
51039 error = security_inode_follow_link(path.dentry, &nd);
51040 if (error)
51041 goto exit_dput;
51042+
51043+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
51044+ path.dentry, nd.path.mnt)) {
51045+ error = -EACCES;
51046+ goto exit_dput;
51047+ }
51048+
51049 error = __do_follow_link(&path, &nd);
51050 if (error) {
51051 /* Does someone understand code flow here? Or it is only
51052@@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
51053 }
51054 return dentry;
51055 eexist:
51056+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
51057+ dput(dentry);
51058+ return ERR_PTR(-ENOENT);
51059+ }
51060 dput(dentry);
51061 dentry = ERR_PTR(-EEXIST);
51062 fail:
51063@@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51064 error = may_mknod(mode);
51065 if (error)
51066 goto out_dput;
51067+
51068+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
51069+ error = -EPERM;
51070+ goto out_dput;
51071+ }
51072+
51073+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
51074+ error = -EACCES;
51075+ goto out_dput;
51076+ }
51077+
51078 error = mnt_want_write(nd.path.mnt);
51079 if (error)
51080 goto out_dput;
51081@@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51082 }
51083 out_drop_write:
51084 mnt_drop_write(nd.path.mnt);
51085+
51086+ if (!error)
51087+ gr_handle_create(dentry, nd.path.mnt);
51088 out_dput:
51089 dput(dentry);
51090 out_unlock:
51091@@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51092 if (IS_ERR(dentry))
51093 goto out_unlock;
51094
51095+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
51096+ error = -EACCES;
51097+ goto out_dput;
51098+ }
51099+
51100 if (!IS_POSIXACL(nd.path.dentry->d_inode))
51101 mode &= ~current_umask();
51102 error = mnt_want_write(nd.path.mnt);
51103@@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51104 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
51105 out_drop_write:
51106 mnt_drop_write(nd.path.mnt);
51107+
51108+ if (!error)
51109+ gr_handle_create(dentry, nd.path.mnt);
51110+
51111 out_dput:
51112 dput(dentry);
51113 out_unlock:
51114@@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51115 char * name;
51116 struct dentry *dentry;
51117 struct nameidata nd;
51118+ ino_t saved_ino = 0;
51119+ dev_t saved_dev = 0;
51120
51121 error = user_path_parent(dfd, pathname, &nd, &name);
51122 if (error)
51123@@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
51124 error = PTR_ERR(dentry);
51125 if (IS_ERR(dentry))
51126 goto exit2;
51127+
51128+ if (dentry->d_inode != NULL) {
51129+ saved_ino = dentry->d_inode->i_ino;
51130+ saved_dev = gr_get_dev_from_dentry(dentry);
51131+
51132+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
51133+ error = -EACCES;
51134+ goto exit3;
51135+ }
51136+ }
51137+
51138 error = mnt_want_write(nd.path.mnt);
51139 if (error)
51140 goto exit3;
51141@@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51142 if (error)
51143 goto exit4;
51144 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
51145+ if (!error && (saved_dev || saved_ino))
51146+ gr_handle_delete(saved_ino, saved_dev);
51147 exit4:
51148 mnt_drop_write(nd.path.mnt);
51149 exit3:
51150@@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51151 struct dentry *dentry;
51152 struct nameidata nd;
51153 struct inode *inode = NULL;
51154+ ino_t saved_ino = 0;
51155+ dev_t saved_dev = 0;
51156
51157 error = user_path_parent(dfd, pathname, &nd, &name);
51158 if (error)
51159@@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51160 if (nd.last.name[nd.last.len])
51161 goto slashes;
51162 inode = dentry->d_inode;
51163- if (inode)
51164+ if (inode) {
51165+ if (inode->i_nlink <= 1) {
51166+ saved_ino = inode->i_ino;
51167+ saved_dev = gr_get_dev_from_dentry(dentry);
51168+ }
51169+
51170 atomic_inc(&inode->i_count);
51171+
51172+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
51173+ error = -EACCES;
51174+ goto exit2;
51175+ }
51176+ }
51177 error = mnt_want_write(nd.path.mnt);
51178 if (error)
51179 goto exit2;
51180@@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51181 if (error)
51182 goto exit3;
51183 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
51184+ if (!error && (saved_ino || saved_dev))
51185+ gr_handle_delete(saved_ino, saved_dev);
51186 exit3:
51187 mnt_drop_write(nd.path.mnt);
51188 exit2:
51189@@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51190 if (IS_ERR(dentry))
51191 goto out_unlock;
51192
51193+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
51194+ error = -EACCES;
51195+ goto out_dput;
51196+ }
51197+
51198 error = mnt_want_write(nd.path.mnt);
51199 if (error)
51200 goto out_dput;
51201@@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51202 if (error)
51203 goto out_drop_write;
51204 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
51205+ if (!error)
51206+ gr_handle_create(dentry, nd.path.mnt);
51207 out_drop_write:
51208 mnt_drop_write(nd.path.mnt);
51209 out_dput:
51210@@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51211 error = PTR_ERR(new_dentry);
51212 if (IS_ERR(new_dentry))
51213 goto out_unlock;
51214+
51215+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
51216+ old_path.dentry->d_inode,
51217+ old_path.dentry->d_inode->i_mode, to)) {
51218+ error = -EACCES;
51219+ goto out_dput;
51220+ }
51221+
51222+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
51223+ old_path.dentry, old_path.mnt, to)) {
51224+ error = -EACCES;
51225+ goto out_dput;
51226+ }
51227+
51228 error = mnt_want_write(nd.path.mnt);
51229 if (error)
51230 goto out_dput;
51231@@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51232 if (error)
51233 goto out_drop_write;
51234 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
51235+ if (!error)
51236+ gr_handle_create(new_dentry, nd.path.mnt);
51237 out_drop_write:
51238 mnt_drop_write(nd.path.mnt);
51239 out_dput:
51240@@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51241 char *to;
51242 int error;
51243
51244+ pax_track_stack();
51245+
51246 error = user_path_parent(olddfd, oldname, &oldnd, &from);
51247 if (error)
51248 goto exit;
51249@@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51250 if (new_dentry == trap)
51251 goto exit5;
51252
51253+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
51254+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
51255+ to);
51256+ if (error)
51257+ goto exit5;
51258+
51259 error = mnt_want_write(oldnd.path.mnt);
51260 if (error)
51261 goto exit5;
51262@@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51263 goto exit6;
51264 error = vfs_rename(old_dir->d_inode, old_dentry,
51265 new_dir->d_inode, new_dentry);
51266+ if (!error)
51267+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
51268+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
51269 exit6:
51270 mnt_drop_write(oldnd.path.mnt);
51271 exit5:
51272@@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
51273
51274 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
51275 {
51276+ char tmpbuf[64];
51277+ const char *newlink;
51278 int len;
51279
51280 len = PTR_ERR(link);
51281@@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
51282 len = strlen(link);
51283 if (len > (unsigned) buflen)
51284 len = buflen;
51285- if (copy_to_user(buffer, link, len))
51286+
51287+ if (len < sizeof(tmpbuf)) {
51288+ memcpy(tmpbuf, link, len);
51289+ newlink = tmpbuf;
51290+ } else
51291+ newlink = link;
51292+
51293+ if (copy_to_user(buffer, newlink, len))
51294 len = -EFAULT;
51295 out:
51296 return len;
51297diff --git a/fs/namespace.c b/fs/namespace.c
51298index 2beb0fb..11a95a5 100644
51299--- a/fs/namespace.c
51300+++ b/fs/namespace.c
51301@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51302 if (!(sb->s_flags & MS_RDONLY))
51303 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
51304 up_write(&sb->s_umount);
51305+
51306+ gr_log_remount(mnt->mnt_devname, retval);
51307+
51308 return retval;
51309 }
51310
51311@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51312 security_sb_umount_busy(mnt);
51313 up_write(&namespace_sem);
51314 release_mounts(&umount_list);
51315+
51316+ gr_log_unmount(mnt->mnt_devname, retval);
51317+
51318 return retval;
51319 }
51320
51321@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51322 if (retval)
51323 goto dput_out;
51324
51325+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
51326+ retval = -EPERM;
51327+ goto dput_out;
51328+ }
51329+
51330+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
51331+ retval = -EPERM;
51332+ goto dput_out;
51333+ }
51334+
51335 if (flags & MS_REMOUNT)
51336 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
51337 data_page);
51338@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51339 dev_name, data_page);
51340 dput_out:
51341 path_put(&path);
51342+
51343+ gr_log_mount(dev_name, dir_name, retval);
51344+
51345 return retval;
51346 }
51347
51348@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
51349 goto out1;
51350 }
51351
51352+ if (gr_handle_chroot_pivot()) {
51353+ error = -EPERM;
51354+ path_put(&old);
51355+ goto out1;
51356+ }
51357+
51358 read_lock(&current->fs->lock);
51359 root = current->fs->root;
51360 path_get(&current->fs->root);
51361diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
51362index b8b5b30..2bd9ccb 100644
51363--- a/fs/ncpfs/dir.c
51364+++ b/fs/ncpfs/dir.c
51365@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
51366 int res, val = 0, len;
51367 __u8 __name[NCP_MAXPATHLEN + 1];
51368
51369+ pax_track_stack();
51370+
51371 parent = dget_parent(dentry);
51372 dir = parent->d_inode;
51373
51374@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
51375 int error, res, len;
51376 __u8 __name[NCP_MAXPATHLEN + 1];
51377
51378+ pax_track_stack();
51379+
51380 lock_kernel();
51381 error = -EIO;
51382 if (!ncp_conn_valid(server))
51383@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
51384 int error, result, len;
51385 int opmode;
51386 __u8 __name[NCP_MAXPATHLEN + 1];
51387-
51388+
51389 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
51390 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
51391
51392+ pax_track_stack();
51393+
51394 error = -EIO;
51395 lock_kernel();
51396 if (!ncp_conn_valid(server))
51397@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51398 int error, len;
51399 __u8 __name[NCP_MAXPATHLEN + 1];
51400
51401+ pax_track_stack();
51402+
51403 DPRINTK("ncp_mkdir: making %s/%s\n",
51404 dentry->d_parent->d_name.name, dentry->d_name.name);
51405
51406@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51407 if (!ncp_conn_valid(server))
51408 goto out;
51409
51410+ pax_track_stack();
51411+
51412 ncp_age_dentry(server, dentry);
51413 len = sizeof(__name);
51414 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
51415@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
51416 int old_len, new_len;
51417 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
51418
51419+ pax_track_stack();
51420+
51421 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
51422 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
51423 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
51424diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
51425index cf98da1..da890a9 100644
51426--- a/fs/ncpfs/inode.c
51427+++ b/fs/ncpfs/inode.c
51428@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
51429 #endif
51430 struct ncp_entry_info finfo;
51431
51432+ pax_track_stack();
51433+
51434 data.wdog_pid = NULL;
51435 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
51436 if (!server)
51437diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
51438index bfaef7b..e9d03ca 100644
51439--- a/fs/nfs/inode.c
51440+++ b/fs/nfs/inode.c
51441@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
51442 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
51443 nfsi->attrtimeo_timestamp = jiffies;
51444
51445- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
51446+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
51447 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
51448 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
51449 else
51450@@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
51451 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
51452 }
51453
51454-static atomic_long_t nfs_attr_generation_counter;
51455+static atomic_long_unchecked_t nfs_attr_generation_counter;
51456
51457 static unsigned long nfs_read_attr_generation_counter(void)
51458 {
51459- return atomic_long_read(&nfs_attr_generation_counter);
51460+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
51461 }
51462
51463 unsigned long nfs_inc_attr_generation_counter(void)
51464 {
51465- return atomic_long_inc_return(&nfs_attr_generation_counter);
51466+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
51467 }
51468
51469 void nfs_fattr_init(struct nfs_fattr *fattr)
51470diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
51471index cc2f505..f6a236f 100644
51472--- a/fs/nfsd/lockd.c
51473+++ b/fs/nfsd/lockd.c
51474@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
51475 fput(filp);
51476 }
51477
51478-static struct nlmsvc_binding nfsd_nlm_ops = {
51479+static const struct nlmsvc_binding nfsd_nlm_ops = {
51480 .fopen = nlm_fopen, /* open file for locking */
51481 .fclose = nlm_fclose, /* close file */
51482 };
51483diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
51484index cfc3391..dcc083a 100644
51485--- a/fs/nfsd/nfs4state.c
51486+++ b/fs/nfsd/nfs4state.c
51487@@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
51488 unsigned int cmd;
51489 int err;
51490
51491+ pax_track_stack();
51492+
51493 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
51494 (long long) lock->lk_offset,
51495 (long long) lock->lk_length);
51496diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
51497index 4a82a96..0d5fb49 100644
51498--- a/fs/nfsd/nfs4xdr.c
51499+++ b/fs/nfsd/nfs4xdr.c
51500@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
51501 struct nfsd4_compoundres *resp = rqstp->rq_resp;
51502 u32 minorversion = resp->cstate.minorversion;
51503
51504+ pax_track_stack();
51505+
51506 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
51507 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
51508 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
51509diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
51510index 2e09588..596421d 100644
51511--- a/fs/nfsd/vfs.c
51512+++ b/fs/nfsd/vfs.c
51513@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
51514 } else {
51515 oldfs = get_fs();
51516 set_fs(KERNEL_DS);
51517- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
51518+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
51519 set_fs(oldfs);
51520 }
51521
51522@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
51523
51524 /* Write the data. */
51525 oldfs = get_fs(); set_fs(KERNEL_DS);
51526- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
51527+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
51528 set_fs(oldfs);
51529 if (host_err < 0)
51530 goto out_nfserr;
51531@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
51532 */
51533
51534 oldfs = get_fs(); set_fs(KERNEL_DS);
51535- host_err = inode->i_op->readlink(dentry, buf, *lenp);
51536+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
51537 set_fs(oldfs);
51538
51539 if (host_err < 0)
51540diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
51541index f6af760..d6b2b83 100644
51542--- a/fs/nilfs2/ioctl.c
51543+++ b/fs/nilfs2/ioctl.c
51544@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
51545 unsigned int cmd, void __user *argp)
51546 {
51547 struct nilfs_argv argv[5];
51548- const static size_t argsz[5] = {
51549+ static const size_t argsz[5] = {
51550 sizeof(struct nilfs_vdesc),
51551 sizeof(struct nilfs_period),
51552 sizeof(__u64),
51553diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
51554index 7e54e52..9337248 100644
51555--- a/fs/notify/dnotify/dnotify.c
51556+++ b/fs/notify/dnotify/dnotify.c
51557@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
51558 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
51559 }
51560
51561-static struct fsnotify_ops dnotify_fsnotify_ops = {
51562+static const struct fsnotify_ops dnotify_fsnotify_ops = {
51563 .handle_event = dnotify_handle_event,
51564 .should_send_event = dnotify_should_send_event,
51565 .free_group_priv = NULL,
51566diff --git a/fs/notify/notification.c b/fs/notify/notification.c
51567index b8bf53b..c518688 100644
51568--- a/fs/notify/notification.c
51569+++ b/fs/notify/notification.c
51570@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
51571 * get set to 0 so it will never get 'freed'
51572 */
51573 static struct fsnotify_event q_overflow_event;
51574-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
51575+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
51576
51577 /**
51578 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
51579@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
51580 */
51581 u32 fsnotify_get_cookie(void)
51582 {
51583- return atomic_inc_return(&fsnotify_sync_cookie);
51584+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
51585 }
51586 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
51587
51588diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
51589index 5a9e344..0f8cd28 100644
51590--- a/fs/ntfs/dir.c
51591+++ b/fs/ntfs/dir.c
51592@@ -1328,7 +1328,7 @@ find_next_index_buffer:
51593 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
51594 ~(s64)(ndir->itype.index.block_size - 1)));
51595 /* Bounds checks. */
51596- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
51597+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
51598 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
51599 "inode 0x%lx or driver bug.", vdir->i_ino);
51600 goto err_out;
51601diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
51602index 663c0e3..b6868e9 100644
51603--- a/fs/ntfs/file.c
51604+++ b/fs/ntfs/file.c
51605@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
51606 #endif /* NTFS_RW */
51607 };
51608
51609-const struct file_operations ntfs_empty_file_ops = {};
51610+const struct file_operations ntfs_empty_file_ops __read_only;
51611
51612-const struct inode_operations ntfs_empty_inode_ops = {};
51613+const struct inode_operations ntfs_empty_inode_ops __read_only;
51614diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
51615index 1cd2934..880b5d2 100644
51616--- a/fs/ocfs2/cluster/masklog.c
51617+++ b/fs/ocfs2/cluster/masklog.c
51618@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
51619 return mlog_mask_store(mlog_attr->mask, buf, count);
51620 }
51621
51622-static struct sysfs_ops mlog_attr_ops = {
51623+static const struct sysfs_ops mlog_attr_ops = {
51624 .show = mlog_show,
51625 .store = mlog_store,
51626 };
51627diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
51628index ac10f83..2cd2607 100644
51629--- a/fs/ocfs2/localalloc.c
51630+++ b/fs/ocfs2/localalloc.c
51631@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
51632 goto bail;
51633 }
51634
51635- atomic_inc(&osb->alloc_stats.moves);
51636+ atomic_inc_unchecked(&osb->alloc_stats.moves);
51637
51638 status = 0;
51639 bail:
51640diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
51641index f010b22..9f9ed34 100644
51642--- a/fs/ocfs2/namei.c
51643+++ b/fs/ocfs2/namei.c
51644@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
51645 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
51646 struct ocfs2_dir_lookup_result target_insert = { NULL, };
51647
51648+ pax_track_stack();
51649+
51650 /* At some point it might be nice to break this function up a
51651 * bit. */
51652
51653diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
51654index d963d86..914cfbd 100644
51655--- a/fs/ocfs2/ocfs2.h
51656+++ b/fs/ocfs2/ocfs2.h
51657@@ -217,11 +217,11 @@ enum ocfs2_vol_state
51658
51659 struct ocfs2_alloc_stats
51660 {
51661- atomic_t moves;
51662- atomic_t local_data;
51663- atomic_t bitmap_data;
51664- atomic_t bg_allocs;
51665- atomic_t bg_extends;
51666+ atomic_unchecked_t moves;
51667+ atomic_unchecked_t local_data;
51668+ atomic_unchecked_t bitmap_data;
51669+ atomic_unchecked_t bg_allocs;
51670+ atomic_unchecked_t bg_extends;
51671 };
51672
51673 enum ocfs2_local_alloc_state
51674diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
51675index 79b5dac..d322952 100644
51676--- a/fs/ocfs2/suballoc.c
51677+++ b/fs/ocfs2/suballoc.c
51678@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
51679 mlog_errno(status);
51680 goto bail;
51681 }
51682- atomic_inc(&osb->alloc_stats.bg_extends);
51683+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
51684
51685 /* You should never ask for this much metadata */
51686 BUG_ON(bits_wanted >
51687@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
51688 mlog_errno(status);
51689 goto bail;
51690 }
51691- atomic_inc(&osb->alloc_stats.bg_allocs);
51692+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
51693
51694 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
51695 ac->ac_bits_given += (*num_bits);
51696@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
51697 mlog_errno(status);
51698 goto bail;
51699 }
51700- atomic_inc(&osb->alloc_stats.bg_allocs);
51701+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
51702
51703 BUG_ON(num_bits != 1);
51704
51705@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
51706 cluster_start,
51707 num_clusters);
51708 if (!status)
51709- atomic_inc(&osb->alloc_stats.local_data);
51710+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
51711 } else {
51712 if (min_clusters > (osb->bitmap_cpg - 1)) {
51713 /* The only paths asking for contiguousness
51714@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
51715 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
51716 bg_blkno,
51717 bg_bit_off);
51718- atomic_inc(&osb->alloc_stats.bitmap_data);
51719+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
51720 }
51721 }
51722 if (status < 0) {
51723diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
51724index 9f55be4..a3f8048 100644
51725--- a/fs/ocfs2/super.c
51726+++ b/fs/ocfs2/super.c
51727@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
51728 "%10s => GlobalAllocs: %d LocalAllocs: %d "
51729 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
51730 "Stats",
51731- atomic_read(&osb->alloc_stats.bitmap_data),
51732- atomic_read(&osb->alloc_stats.local_data),
51733- atomic_read(&osb->alloc_stats.bg_allocs),
51734- atomic_read(&osb->alloc_stats.moves),
51735- atomic_read(&osb->alloc_stats.bg_extends));
51736+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
51737+ atomic_read_unchecked(&osb->alloc_stats.local_data),
51738+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
51739+ atomic_read_unchecked(&osb->alloc_stats.moves),
51740+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
51741
51742 out += snprintf(buf + out, len - out,
51743 "%10s => State: %u Descriptor: %llu Size: %u bits "
51744@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
51745 spin_lock_init(&osb->osb_xattr_lock);
51746 ocfs2_init_inode_steal_slot(osb);
51747
51748- atomic_set(&osb->alloc_stats.moves, 0);
51749- atomic_set(&osb->alloc_stats.local_data, 0);
51750- atomic_set(&osb->alloc_stats.bitmap_data, 0);
51751- atomic_set(&osb->alloc_stats.bg_allocs, 0);
51752- atomic_set(&osb->alloc_stats.bg_extends, 0);
51753+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
51754+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
51755+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
51756+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
51757+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
51758
51759 /* Copy the blockcheck stats from the superblock probe */
51760 osb->osb_ecc_stats = *stats;
51761diff --git a/fs/open.c b/fs/open.c
51762index 4f01e06..091f6c3 100644
51763--- a/fs/open.c
51764+++ b/fs/open.c
51765@@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
51766 error = locks_verify_truncate(inode, NULL, length);
51767 if (!error)
51768 error = security_path_truncate(&path, length, 0);
51769+
51770+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
51771+ error = -EACCES;
51772+
51773 if (!error) {
51774 vfs_dq_init(inode);
51775 error = do_truncate(path.dentry, length, 0, NULL);
51776@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
51777 if (__mnt_is_readonly(path.mnt))
51778 res = -EROFS;
51779
51780+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
51781+ res = -EACCES;
51782+
51783 out_path_release:
51784 path_put(&path);
51785 out:
51786@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
51787 if (error)
51788 goto dput_and_out;
51789
51790+ gr_log_chdir(path.dentry, path.mnt);
51791+
51792 set_fs_pwd(current->fs, &path);
51793
51794 dput_and_out:
51795@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
51796 goto out_putf;
51797
51798 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
51799+
51800+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
51801+ error = -EPERM;
51802+
51803+ if (!error)
51804+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
51805+
51806 if (!error)
51807 set_fs_pwd(current->fs, &file->f_path);
51808 out_putf:
51809@@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
51810 if (!capable(CAP_SYS_CHROOT))
51811 goto dput_and_out;
51812
51813+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
51814+ goto dput_and_out;
51815+
51816 set_fs_root(current->fs, &path);
51817+
51818+ gr_handle_chroot_chdir(&path);
51819+
51820 error = 0;
51821 dput_and_out:
51822 path_put(&path);
51823@@ -616,12 +638,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
51824 err = mnt_want_write_file(file);
51825 if (err)
51826 goto out_putf;
51827+
51828 mutex_lock(&inode->i_mutex);
51829+
51830+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
51831+ err = -EACCES;
51832+ goto out_unlock;
51833+ }
51834+
51835 if (mode == (mode_t) -1)
51836 mode = inode->i_mode;
51837+
51838+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
51839+ err = -EPERM;
51840+ goto out_unlock;
51841+ }
51842+
51843 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
51844 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
51845 err = notify_change(dentry, &newattrs);
51846+
51847+out_unlock:
51848 mutex_unlock(&inode->i_mutex);
51849 mnt_drop_write(file->f_path.mnt);
51850 out_putf:
51851@@ -645,12 +682,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
51852 error = mnt_want_write(path.mnt);
51853 if (error)
51854 goto dput_and_out;
51855+
51856 mutex_lock(&inode->i_mutex);
51857+
51858+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
51859+ error = -EACCES;
51860+ goto out_unlock;
51861+ }
51862+
51863 if (mode == (mode_t) -1)
51864 mode = inode->i_mode;
51865+
51866+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
51867+ error = -EACCES;
51868+ goto out_unlock;
51869+ }
51870+
51871 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
51872 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
51873 error = notify_change(path.dentry, &newattrs);
51874+
51875+out_unlock:
51876 mutex_unlock(&inode->i_mutex);
51877 mnt_drop_write(path.mnt);
51878 dput_and_out:
51879@@ -664,12 +716,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
51880 return sys_fchmodat(AT_FDCWD, filename, mode);
51881 }
51882
51883-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
51884+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
51885 {
51886 struct inode *inode = dentry->d_inode;
51887 int error;
51888 struct iattr newattrs;
51889
51890+ if (!gr_acl_handle_chown(dentry, mnt))
51891+ return -EACCES;
51892+
51893 newattrs.ia_valid = ATTR_CTIME;
51894 if (user != (uid_t) -1) {
51895 newattrs.ia_valid |= ATTR_UID;
51896@@ -700,7 +755,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
51897 error = mnt_want_write(path.mnt);
51898 if (error)
51899 goto out_release;
51900- error = chown_common(path.dentry, user, group);
51901+ error = chown_common(path.dentry, user, group, path.mnt);
51902 mnt_drop_write(path.mnt);
51903 out_release:
51904 path_put(&path);
51905@@ -725,7 +780,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
51906 error = mnt_want_write(path.mnt);
51907 if (error)
51908 goto out_release;
51909- error = chown_common(path.dentry, user, group);
51910+ error = chown_common(path.dentry, user, group, path.mnt);
51911 mnt_drop_write(path.mnt);
51912 out_release:
51913 path_put(&path);
51914@@ -744,7 +799,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
51915 error = mnt_want_write(path.mnt);
51916 if (error)
51917 goto out_release;
51918- error = chown_common(path.dentry, user, group);
51919+ error = chown_common(path.dentry, user, group, path.mnt);
51920 mnt_drop_write(path.mnt);
51921 out_release:
51922 path_put(&path);
51923@@ -767,7 +822,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
51924 goto out_fput;
51925 dentry = file->f_path.dentry;
51926 audit_inode(NULL, dentry);
51927- error = chown_common(dentry, user, group);
51928+ error = chown_common(dentry, user, group, file->f_path.mnt);
51929 mnt_drop_write(file->f_path.mnt);
51930 out_fput:
51931 fput(file);
51932@@ -1036,7 +1091,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
51933 if (!IS_ERR(tmp)) {
51934 fd = get_unused_fd_flags(flags);
51935 if (fd >= 0) {
51936- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
51937+ struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
51938 if (IS_ERR(f)) {
51939 put_unused_fd(fd);
51940 fd = PTR_ERR(f);
51941diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
51942index dd6efdb..3babc6c 100644
51943--- a/fs/partitions/ldm.c
51944+++ b/fs/partitions/ldm.c
51945@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
51946 ldm_error ("A VBLK claims to have %d parts.", num);
51947 return false;
51948 }
51949+
51950 if (rec >= num) {
51951 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
51952 return false;
51953@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
51954 goto found;
51955 }
51956
51957- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
51958+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
51959 if (!f) {
51960 ldm_crit ("Out of memory.");
51961 return false;
51962diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
51963index 5765198..7f8e9e0 100644
51964--- a/fs/partitions/mac.c
51965+++ b/fs/partitions/mac.c
51966@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
51967 return 0; /* not a MacOS disk */
51968 }
51969 blocks_in_map = be32_to_cpu(part->map_count);
51970+ printk(" [mac]");
51971 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
51972 put_dev_sector(sect);
51973 return 0;
51974 }
51975- printk(" [mac]");
51976 for (slot = 1; slot <= blocks_in_map; ++slot) {
51977 int pos = slot * secsize;
51978 put_dev_sector(sect);
51979diff --git a/fs/pipe.c b/fs/pipe.c
51980index d0cc080..8a6f211 100644
51981--- a/fs/pipe.c
51982+++ b/fs/pipe.c
51983@@ -401,9 +401,9 @@ redo:
51984 }
51985 if (bufs) /* More to do? */
51986 continue;
51987- if (!pipe->writers)
51988+ if (!atomic_read(&pipe->writers))
51989 break;
51990- if (!pipe->waiting_writers) {
51991+ if (!atomic_read(&pipe->waiting_writers)) {
51992 /* syscall merging: Usually we must not sleep
51993 * if O_NONBLOCK is set, or if we got some data.
51994 * But if a writer sleeps in kernel space, then
51995@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
51996 mutex_lock(&inode->i_mutex);
51997 pipe = inode->i_pipe;
51998
51999- if (!pipe->readers) {
52000+ if (!atomic_read(&pipe->readers)) {
52001 send_sig(SIGPIPE, current, 0);
52002 ret = -EPIPE;
52003 goto out;
52004@@ -511,7 +511,7 @@ redo1:
52005 for (;;) {
52006 int bufs;
52007
52008- if (!pipe->readers) {
52009+ if (!atomic_read(&pipe->readers)) {
52010 send_sig(SIGPIPE, current, 0);
52011 if (!ret)
52012 ret = -EPIPE;
52013@@ -597,9 +597,9 @@ redo2:
52014 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
52015 do_wakeup = 0;
52016 }
52017- pipe->waiting_writers++;
52018+ atomic_inc(&pipe->waiting_writers);
52019 pipe_wait(pipe);
52020- pipe->waiting_writers--;
52021+ atomic_dec(&pipe->waiting_writers);
52022 }
52023 out:
52024 mutex_unlock(&inode->i_mutex);
52025@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52026 mask = 0;
52027 if (filp->f_mode & FMODE_READ) {
52028 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
52029- if (!pipe->writers && filp->f_version != pipe->w_counter)
52030+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
52031 mask |= POLLHUP;
52032 }
52033
52034@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52035 * Most Unices do not set POLLERR for FIFOs but on Linux they
52036 * behave exactly like pipes for poll().
52037 */
52038- if (!pipe->readers)
52039+ if (!atomic_read(&pipe->readers))
52040 mask |= POLLERR;
52041 }
52042
52043@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
52044
52045 mutex_lock(&inode->i_mutex);
52046 pipe = inode->i_pipe;
52047- pipe->readers -= decr;
52048- pipe->writers -= decw;
52049+ atomic_sub(decr, &pipe->readers);
52050+ atomic_sub(decw, &pipe->writers);
52051
52052- if (!pipe->readers && !pipe->writers) {
52053+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
52054 free_pipe_info(inode);
52055 } else {
52056 wake_up_interruptible_sync(&pipe->wait);
52057@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
52058
52059 if (inode->i_pipe) {
52060 ret = 0;
52061- inode->i_pipe->readers++;
52062+ atomic_inc(&inode->i_pipe->readers);
52063 }
52064
52065 mutex_unlock(&inode->i_mutex);
52066@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
52067
52068 if (inode->i_pipe) {
52069 ret = 0;
52070- inode->i_pipe->writers++;
52071+ atomic_inc(&inode->i_pipe->writers);
52072 }
52073
52074 mutex_unlock(&inode->i_mutex);
52075@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
52076 if (inode->i_pipe) {
52077 ret = 0;
52078 if (filp->f_mode & FMODE_READ)
52079- inode->i_pipe->readers++;
52080+ atomic_inc(&inode->i_pipe->readers);
52081 if (filp->f_mode & FMODE_WRITE)
52082- inode->i_pipe->writers++;
52083+ atomic_inc(&inode->i_pipe->writers);
52084 }
52085
52086 mutex_unlock(&inode->i_mutex);
52087@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
52088 inode->i_pipe = NULL;
52089 }
52090
52091-static struct vfsmount *pipe_mnt __read_mostly;
52092+struct vfsmount *pipe_mnt __read_mostly;
52093 static int pipefs_delete_dentry(struct dentry *dentry)
52094 {
52095 /*
52096@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
52097 goto fail_iput;
52098 inode->i_pipe = pipe;
52099
52100- pipe->readers = pipe->writers = 1;
52101+ atomic_set(&pipe->readers, 1);
52102+ atomic_set(&pipe->writers, 1);
52103 inode->i_fop = &rdwr_pipefifo_fops;
52104
52105 /*
52106diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
52107index 50f8f06..c5755df 100644
52108--- a/fs/proc/Kconfig
52109+++ b/fs/proc/Kconfig
52110@@ -30,12 +30,12 @@ config PROC_FS
52111
52112 config PROC_KCORE
52113 bool "/proc/kcore support" if !ARM
52114- depends on PROC_FS && MMU
52115+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
52116
52117 config PROC_VMCORE
52118 bool "/proc/vmcore support (EXPERIMENTAL)"
52119- depends on PROC_FS && CRASH_DUMP
52120- default y
52121+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
52122+ default n
52123 help
52124 Exports the dump image of crashed kernel in ELF format.
52125
52126@@ -59,8 +59,8 @@ config PROC_SYSCTL
52127 limited in memory.
52128
52129 config PROC_PAGE_MONITOR
52130- default y
52131- depends on PROC_FS && MMU
52132+ default n
52133+ depends on PROC_FS && MMU && !GRKERNSEC
52134 bool "Enable /proc page monitoring" if EMBEDDED
52135 help
52136 Various /proc files exist to monitor process memory utilization:
52137diff --git a/fs/proc/array.c b/fs/proc/array.c
52138index c5ef152..1363194 100644
52139--- a/fs/proc/array.c
52140+++ b/fs/proc/array.c
52141@@ -60,6 +60,7 @@
52142 #include <linux/tty.h>
52143 #include <linux/string.h>
52144 #include <linux/mman.h>
52145+#include <linux/grsecurity.h>
52146 #include <linux/proc_fs.h>
52147 #include <linux/ioport.h>
52148 #include <linux/uaccess.h>
52149@@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
52150 p->nivcsw);
52151 }
52152
52153+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52154+static inline void task_pax(struct seq_file *m, struct task_struct *p)
52155+{
52156+ if (p->mm)
52157+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
52158+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
52159+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
52160+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
52161+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
52162+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
52163+ else
52164+ seq_printf(m, "PaX:\t-----\n");
52165+}
52166+#endif
52167+
52168 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52169 struct pid *pid, struct task_struct *task)
52170 {
52171@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52172 task_cap(m, task);
52173 cpuset_task_status_allowed(m, task);
52174 task_context_switch_counts(m, task);
52175+
52176+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52177+ task_pax(m, task);
52178+#endif
52179+
52180+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
52181+ task_grsec_rbac(m, task);
52182+#endif
52183+
52184 return 0;
52185 }
52186
52187+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52188+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52189+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
52190+ _mm->pax_flags & MF_PAX_SEGMEXEC))
52191+#endif
52192+
52193 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52194 struct pid *pid, struct task_struct *task, int whole)
52195 {
52196@@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52197 cputime_t cutime, cstime, utime, stime;
52198 cputime_t cgtime, gtime;
52199 unsigned long rsslim = 0;
52200- char tcomm[sizeof(task->comm)];
52201+ char tcomm[sizeof(task->comm)] = { 0 };
52202 unsigned long flags;
52203
52204+ pax_track_stack();
52205+
52206 state = *get_task_state(task);
52207 vsize = eip = esp = 0;
52208 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
52209@@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52210 gtime = task_gtime(task);
52211 }
52212
52213+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52214+ if (PAX_RAND_FLAGS(mm)) {
52215+ eip = 0;
52216+ esp = 0;
52217+ wchan = 0;
52218+ }
52219+#endif
52220+#ifdef CONFIG_GRKERNSEC_HIDESYM
52221+ wchan = 0;
52222+ eip =0;
52223+ esp =0;
52224+#endif
52225+
52226 /* scale priority and nice values from timeslices to -20..20 */
52227 /* to make it look like a "normal" Unix priority/nice value */
52228 priority = task_prio(task);
52229@@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52230 vsize,
52231 mm ? get_mm_rss(mm) : 0,
52232 rsslim,
52233+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52234+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
52235+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
52236+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
52237+#else
52238 mm ? (permitted ? mm->start_code : 1) : 0,
52239 mm ? (permitted ? mm->end_code : 1) : 0,
52240 (permitted && mm) ? mm->start_stack : 0,
52241+#endif
52242 esp,
52243 eip,
52244 /* The signal information here is obsolete.
52245@@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
52246
52247 return 0;
52248 }
52249+
52250+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52251+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
52252+{
52253+ u32 curr_ip = 0;
52254+ unsigned long flags;
52255+
52256+ if (lock_task_sighand(task, &flags)) {
52257+ curr_ip = task->signal->curr_ip;
52258+ unlock_task_sighand(task, &flags);
52259+ }
52260+
52261+ return sprintf(buffer, "%pI4\n", &curr_ip);
52262+}
52263+#endif
52264diff --git a/fs/proc/base.c b/fs/proc/base.c
52265index 67f7dc0..e95ea4f 100644
52266--- a/fs/proc/base.c
52267+++ b/fs/proc/base.c
52268@@ -102,6 +102,22 @@ struct pid_entry {
52269 union proc_op op;
52270 };
52271
52272+struct getdents_callback {
52273+ struct linux_dirent __user * current_dir;
52274+ struct linux_dirent __user * previous;
52275+ struct file * file;
52276+ int count;
52277+ int error;
52278+};
52279+
52280+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
52281+ loff_t offset, u64 ino, unsigned int d_type)
52282+{
52283+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
52284+ buf->error = -EINVAL;
52285+ return 0;
52286+}
52287+
52288 #define NOD(NAME, MODE, IOP, FOP, OP) { \
52289 .name = (NAME), \
52290 .len = sizeof(NAME) - 1, \
52291@@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
52292 if (task == current)
52293 return 0;
52294
52295+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
52296+ return -EPERM;
52297+
52298 /*
52299 * If current is actively ptrace'ing, and would also be
52300 * permitted to freshly attach with ptrace now, permit it.
52301@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
52302 if (!mm->arg_end)
52303 goto out_mm; /* Shh! No looking before we're done */
52304
52305+ if (gr_acl_handle_procpidmem(task))
52306+ goto out_mm;
52307+
52308 len = mm->arg_end - mm->arg_start;
52309
52310 if (len > PAGE_SIZE)
52311@@ -287,12 +309,28 @@ out:
52312 return res;
52313 }
52314
52315+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52316+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52317+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
52318+ _mm->pax_flags & MF_PAX_SEGMEXEC))
52319+#endif
52320+
52321 static int proc_pid_auxv(struct task_struct *task, char *buffer)
52322 {
52323 int res = 0;
52324 struct mm_struct *mm = get_task_mm(task);
52325 if (mm) {
52326 unsigned int nwords = 0;
52327+
52328+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52329+ /* allow if we're currently ptracing this task */
52330+ if (PAX_RAND_FLAGS(mm) &&
52331+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
52332+ mmput(mm);
52333+ return 0;
52334+ }
52335+#endif
52336+
52337 do {
52338 nwords += 2;
52339 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
52340@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
52341 }
52342
52343
52344-#ifdef CONFIG_KALLSYMS
52345+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52346 /*
52347 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
52348 * Returns the resolved symbol. If that fails, simply return the address.
52349@@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
52350 mutex_unlock(&task->cred_guard_mutex);
52351 }
52352
52353-#ifdef CONFIG_STACKTRACE
52354+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52355
52356 #define MAX_STACK_TRACE_DEPTH 64
52357
52358@@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
52359 return count;
52360 }
52361
52362-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52363+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52364 static int proc_pid_syscall(struct task_struct *task, char *buffer)
52365 {
52366 long nr;
52367@@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
52368 /************************************************************************/
52369
52370 /* permission checks */
52371-static int proc_fd_access_allowed(struct inode *inode)
52372+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
52373 {
52374 struct task_struct *task;
52375 int allowed = 0;
52376@@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
52377 */
52378 task = get_proc_task(inode);
52379 if (task) {
52380- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52381+ if (log)
52382+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
52383+ else
52384+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52385 put_task_struct(task);
52386 }
52387 return allowed;
52388@@ -963,6 +1004,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
52389 if (!task)
52390 goto out_no_task;
52391
52392+ if (gr_acl_handle_procpidmem(task))
52393+ goto out;
52394+
52395 if (!ptrace_may_access(task, PTRACE_MODE_READ))
52396 goto out;
52397
52398@@ -1377,7 +1421,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
52399 path_put(&nd->path);
52400
52401 /* Are we allowed to snoop on the tasks file descriptors? */
52402- if (!proc_fd_access_allowed(inode))
52403+ if (!proc_fd_access_allowed(inode,0))
52404 goto out;
52405
52406 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
52407@@ -1417,8 +1461,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
52408 struct path path;
52409
52410 /* Are we allowed to snoop on the tasks file descriptors? */
52411- if (!proc_fd_access_allowed(inode))
52412- goto out;
52413+ /* logging this is needed for learning on chromium to work properly,
52414+ but we don't want to flood the logs from 'ps' which does a readlink
52415+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
52416+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
52417+ */
52418+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
52419+ if (!proc_fd_access_allowed(inode,0))
52420+ goto out;
52421+ } else {
52422+ if (!proc_fd_access_allowed(inode,1))
52423+ goto out;
52424+ }
52425
52426 error = PROC_I(inode)->op.proc_get_link(inode, &path);
52427 if (error)
52428@@ -1483,7 +1537,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
52429 rcu_read_lock();
52430 cred = __task_cred(task);
52431 inode->i_uid = cred->euid;
52432+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52433+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52434+#else
52435 inode->i_gid = cred->egid;
52436+#endif
52437 rcu_read_unlock();
52438 }
52439 security_task_to_inode(task, inode);
52440@@ -1501,6 +1559,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
52441 struct inode *inode = dentry->d_inode;
52442 struct task_struct *task;
52443 const struct cred *cred;
52444+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52445+ const struct cred *tmpcred = current_cred();
52446+#endif
52447
52448 generic_fillattr(inode, stat);
52449
52450@@ -1508,13 +1569,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
52451 stat->uid = 0;
52452 stat->gid = 0;
52453 task = pid_task(proc_pid(inode), PIDTYPE_PID);
52454+
52455+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
52456+ rcu_read_unlock();
52457+ return -ENOENT;
52458+ }
52459+
52460 if (task) {
52461+ cred = __task_cred(task);
52462+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52463+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
52464+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52465+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
52466+#endif
52467+ ) {
52468+#endif
52469 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
52470+#ifdef CONFIG_GRKERNSEC_PROC_USER
52471+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
52472+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52473+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
52474+#endif
52475 task_dumpable(task)) {
52476- cred = __task_cred(task);
52477 stat->uid = cred->euid;
52478+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52479+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
52480+#else
52481 stat->gid = cred->egid;
52482+#endif
52483 }
52484+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52485+ } else {
52486+ rcu_read_unlock();
52487+ return -ENOENT;
52488+ }
52489+#endif
52490 }
52491 rcu_read_unlock();
52492 return 0;
52493@@ -1545,11 +1634,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
52494
52495 if (task) {
52496 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
52497+#ifdef CONFIG_GRKERNSEC_PROC_USER
52498+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
52499+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52500+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
52501+#endif
52502 task_dumpable(task)) {
52503 rcu_read_lock();
52504 cred = __task_cred(task);
52505 inode->i_uid = cred->euid;
52506+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52507+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52508+#else
52509 inode->i_gid = cred->egid;
52510+#endif
52511 rcu_read_unlock();
52512 } else {
52513 inode->i_uid = 0;
52514@@ -1670,7 +1768,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
52515 int fd = proc_fd(inode);
52516
52517 if (task) {
52518- files = get_files_struct(task);
52519+ if (!gr_acl_handle_procpidmem(task))
52520+ files = get_files_struct(task);
52521 put_task_struct(task);
52522 }
52523 if (files) {
52524@@ -1922,12 +2021,22 @@ static const struct file_operations proc_fd_operations = {
52525 static int proc_fd_permission(struct inode *inode, int mask)
52526 {
52527 int rv;
52528+ struct task_struct *task;
52529
52530 rv = generic_permission(inode, mask, NULL);
52531- if (rv == 0)
52532- return 0;
52533+
52534 if (task_pid(current) == proc_pid(inode))
52535 rv = 0;
52536+
52537+ task = get_proc_task(inode);
52538+ if (task == NULL)
52539+ return rv;
52540+
52541+ if (gr_acl_handle_procpidmem(task))
52542+ rv = -EACCES;
52543+
52544+ put_task_struct(task);
52545+
52546 return rv;
52547 }
52548
52549@@ -2036,6 +2145,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
52550 if (!task)
52551 goto out_no_task;
52552
52553+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
52554+ goto out;
52555+
52556 /*
52557 * Yes, it does not scale. And it should not. Don't add
52558 * new entries into /proc/<tgid>/ without very good reasons.
52559@@ -2080,6 +2192,9 @@ static int proc_pident_readdir(struct file *filp,
52560 if (!task)
52561 goto out_no_task;
52562
52563+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
52564+ goto out;
52565+
52566 ret = 0;
52567 i = filp->f_pos;
52568 switch (i) {
52569@@ -2347,7 +2462,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
52570 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
52571 void *cookie)
52572 {
52573- char *s = nd_get_link(nd);
52574+ const char *s = nd_get_link(nd);
52575 if (!IS_ERR(s))
52576 __putname(s);
52577 }
52578@@ -2553,7 +2668,7 @@ static const struct pid_entry tgid_base_stuff[] = {
52579 #ifdef CONFIG_SCHED_DEBUG
52580 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
52581 #endif
52582-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52583+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52584 INF("syscall", S_IRUGO, proc_pid_syscall),
52585 #endif
52586 INF("cmdline", S_IRUGO, proc_pid_cmdline),
52587@@ -2578,10 +2693,10 @@ static const struct pid_entry tgid_base_stuff[] = {
52588 #ifdef CONFIG_SECURITY
52589 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
52590 #endif
52591-#ifdef CONFIG_KALLSYMS
52592+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52593 INF("wchan", S_IRUGO, proc_pid_wchan),
52594 #endif
52595-#ifdef CONFIG_STACKTRACE
52596+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52597 ONE("stack", S_IRUGO, proc_pid_stack),
52598 #endif
52599 #ifdef CONFIG_SCHEDSTATS
52600@@ -2611,6 +2726,9 @@ static const struct pid_entry tgid_base_stuff[] = {
52601 #ifdef CONFIG_TASK_IO_ACCOUNTING
52602 INF("io", S_IRUSR, proc_tgid_io_accounting),
52603 #endif
52604+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52605+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
52606+#endif
52607 };
52608
52609 static int proc_tgid_base_readdir(struct file * filp,
52610@@ -2735,7 +2853,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
52611 if (!inode)
52612 goto out;
52613
52614+#ifdef CONFIG_GRKERNSEC_PROC_USER
52615+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
52616+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52617+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52618+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
52619+#else
52620 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
52621+#endif
52622 inode->i_op = &proc_tgid_base_inode_operations;
52623 inode->i_fop = &proc_tgid_base_operations;
52624 inode->i_flags|=S_IMMUTABLE;
52625@@ -2777,7 +2902,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
52626 if (!task)
52627 goto out;
52628
52629+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
52630+ goto out_put_task;
52631+
52632 result = proc_pid_instantiate(dir, dentry, task, NULL);
52633+out_put_task:
52634 put_task_struct(task);
52635 out:
52636 return result;
52637@@ -2842,6 +2971,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
52638 {
52639 unsigned int nr;
52640 struct task_struct *reaper;
52641+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52642+ const struct cred *tmpcred = current_cred();
52643+ const struct cred *itercred;
52644+#endif
52645+ filldir_t __filldir = filldir;
52646 struct tgid_iter iter;
52647 struct pid_namespace *ns;
52648
52649@@ -2865,8 +2999,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
52650 for (iter = next_tgid(ns, iter);
52651 iter.task;
52652 iter.tgid += 1, iter = next_tgid(ns, iter)) {
52653+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52654+ rcu_read_lock();
52655+ itercred = __task_cred(iter.task);
52656+#endif
52657+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
52658+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52659+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
52660+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52661+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
52662+#endif
52663+ )
52664+#endif
52665+ )
52666+ __filldir = &gr_fake_filldir;
52667+ else
52668+ __filldir = filldir;
52669+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52670+ rcu_read_unlock();
52671+#endif
52672 filp->f_pos = iter.tgid + TGID_OFFSET;
52673- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
52674+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
52675 put_task_struct(iter.task);
52676 goto out;
52677 }
52678@@ -2892,7 +3045,7 @@ static const struct pid_entry tid_base_stuff[] = {
52679 #ifdef CONFIG_SCHED_DEBUG
52680 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
52681 #endif
52682-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52683+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52684 INF("syscall", S_IRUGO, proc_pid_syscall),
52685 #endif
52686 INF("cmdline", S_IRUGO, proc_pid_cmdline),
52687@@ -2916,10 +3069,10 @@ static const struct pid_entry tid_base_stuff[] = {
52688 #ifdef CONFIG_SECURITY
52689 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
52690 #endif
52691-#ifdef CONFIG_KALLSYMS
52692+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52693 INF("wchan", S_IRUGO, proc_pid_wchan),
52694 #endif
52695-#ifdef CONFIG_STACKTRACE
52696+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52697 ONE("stack", S_IRUGO, proc_pid_stack),
52698 #endif
52699 #ifdef CONFIG_SCHEDSTATS
52700diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
52701index 82676e3..5f8518a 100644
52702--- a/fs/proc/cmdline.c
52703+++ b/fs/proc/cmdline.c
52704@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
52705
52706 static int __init proc_cmdline_init(void)
52707 {
52708+#ifdef CONFIG_GRKERNSEC_PROC_ADD
52709+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
52710+#else
52711 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
52712+#endif
52713 return 0;
52714 }
52715 module_init(proc_cmdline_init);
52716diff --git a/fs/proc/devices.c b/fs/proc/devices.c
52717index 59ee7da..469b4b6 100644
52718--- a/fs/proc/devices.c
52719+++ b/fs/proc/devices.c
52720@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
52721
52722 static int __init proc_devices_init(void)
52723 {
52724+#ifdef CONFIG_GRKERNSEC_PROC_ADD
52725+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
52726+#else
52727 proc_create("devices", 0, NULL, &proc_devinfo_operations);
52728+#endif
52729 return 0;
52730 }
52731 module_init(proc_devices_init);
52732diff --git a/fs/proc/inode.c b/fs/proc/inode.c
52733index d78ade3..81767f9 100644
52734--- a/fs/proc/inode.c
52735+++ b/fs/proc/inode.c
52736@@ -18,12 +18,19 @@
52737 #include <linux/module.h>
52738 #include <linux/smp_lock.h>
52739 #include <linux/sysctl.h>
52740+#include <linux/grsecurity.h>
52741
52742 #include <asm/system.h>
52743 #include <asm/uaccess.h>
52744
52745 #include "internal.h"
52746
52747+#ifdef CONFIG_PROC_SYSCTL
52748+extern const struct inode_operations proc_sys_inode_operations;
52749+extern const struct inode_operations proc_sys_dir_operations;
52750+#endif
52751+
52752+
52753 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
52754 {
52755 atomic_inc(&de->count);
52756@@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
52757 de_put(de);
52758 if (PROC_I(inode)->sysctl)
52759 sysctl_head_put(PROC_I(inode)->sysctl);
52760+
52761+#ifdef CONFIG_PROC_SYSCTL
52762+ if (inode->i_op == &proc_sys_inode_operations ||
52763+ inode->i_op == &proc_sys_dir_operations)
52764+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
52765+#endif
52766+
52767 clear_inode(inode);
52768 }
52769
52770@@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
52771 if (de->mode) {
52772 inode->i_mode = de->mode;
52773 inode->i_uid = de->uid;
52774+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52775+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52776+#else
52777 inode->i_gid = de->gid;
52778+#endif
52779 }
52780 if (de->size)
52781 inode->i_size = de->size;
52782diff --git a/fs/proc/internal.h b/fs/proc/internal.h
52783index 753ca37..26bcf3b 100644
52784--- a/fs/proc/internal.h
52785+++ b/fs/proc/internal.h
52786@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52787 struct pid *pid, struct task_struct *task);
52788 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
52789 struct pid *pid, struct task_struct *task);
52790+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52791+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
52792+#endif
52793 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
52794
52795 extern const struct file_operations proc_maps_operations;
52796diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
52797index b442dac..aab29cb 100644
52798--- a/fs/proc/kcore.c
52799+++ b/fs/proc/kcore.c
52800@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
52801 off_t offset = 0;
52802 struct kcore_list *m;
52803
52804+ pax_track_stack();
52805+
52806 /* setup ELF header */
52807 elf = (struct elfhdr *) bufp;
52808 bufp += sizeof(struct elfhdr);
52809@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
52810 * the addresses in the elf_phdr on our list.
52811 */
52812 start = kc_offset_to_vaddr(*fpos - elf_buflen);
52813- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
52814+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
52815+ if (tsz > buflen)
52816 tsz = buflen;
52817-
52818+
52819 while (buflen) {
52820 struct kcore_list *m;
52821
52822@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
52823 kfree(elf_buf);
52824 } else {
52825 if (kern_addr_valid(start)) {
52826- unsigned long n;
52827-
52828- n = copy_to_user(buffer, (char *)start, tsz);
52829- /*
52830- * We cannot distingush between fault on source
52831- * and fault on destination. When this happens
52832- * we clear too and hope it will trigger the
52833- * EFAULT again.
52834- */
52835- if (n) {
52836- if (clear_user(buffer + tsz - n,
52837- n))
52838+ char *elf_buf;
52839+ mm_segment_t oldfs;
52840+
52841+ elf_buf = kmalloc(tsz, GFP_KERNEL);
52842+ if (!elf_buf)
52843+ return -ENOMEM;
52844+ oldfs = get_fs();
52845+ set_fs(KERNEL_DS);
52846+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
52847+ set_fs(oldfs);
52848+ if (copy_to_user(buffer, elf_buf, tsz)) {
52849+ kfree(elf_buf);
52850 return -EFAULT;
52851+ }
52852 }
52853+ set_fs(oldfs);
52854+ kfree(elf_buf);
52855 } else {
52856 if (clear_user(buffer, tsz))
52857 return -EFAULT;
52858@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
52859
52860 static int open_kcore(struct inode *inode, struct file *filp)
52861 {
52862+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
52863+ return -EPERM;
52864+#endif
52865 if (!capable(CAP_SYS_RAWIO))
52866 return -EPERM;
52867 if (kcore_need_update)
52868diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
52869index a65239c..ad1182a 100644
52870--- a/fs/proc/meminfo.c
52871+++ b/fs/proc/meminfo.c
52872@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
52873 unsigned long pages[NR_LRU_LISTS];
52874 int lru;
52875
52876+ pax_track_stack();
52877+
52878 /*
52879 * display in kilobytes.
52880 */
52881@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
52882 vmi.used >> 10,
52883 vmi.largest_chunk >> 10
52884 #ifdef CONFIG_MEMORY_FAILURE
52885- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
52886+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
52887 #endif
52888 );
52889
52890diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
52891index 9fe7d7e..cdb62c9 100644
52892--- a/fs/proc/nommu.c
52893+++ b/fs/proc/nommu.c
52894@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
52895 if (len < 1)
52896 len = 1;
52897 seq_printf(m, "%*c", len, ' ');
52898- seq_path(m, &file->f_path, "");
52899+ seq_path(m, &file->f_path, "\n\\");
52900 }
52901
52902 seq_putc(m, '\n');
52903diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
52904index 04d1270..25e1173 100644
52905--- a/fs/proc/proc_net.c
52906+++ b/fs/proc/proc_net.c
52907@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
52908 struct task_struct *task;
52909 struct nsproxy *ns;
52910 struct net *net = NULL;
52911+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52912+ const struct cred *cred = current_cred();
52913+#endif
52914+
52915+#ifdef CONFIG_GRKERNSEC_PROC_USER
52916+ if (cred->fsuid)
52917+ return net;
52918+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52919+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
52920+ return net;
52921+#endif
52922
52923 rcu_read_lock();
52924 task = pid_task(proc_pid(dir), PIDTYPE_PID);
52925diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
52926index f667e8a..55f4d96 100644
52927--- a/fs/proc/proc_sysctl.c
52928+++ b/fs/proc/proc_sysctl.c
52929@@ -7,11 +7,13 @@
52930 #include <linux/security.h>
52931 #include "internal.h"
52932
52933+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
52934+
52935 static const struct dentry_operations proc_sys_dentry_operations;
52936 static const struct file_operations proc_sys_file_operations;
52937-static const struct inode_operations proc_sys_inode_operations;
52938+const struct inode_operations proc_sys_inode_operations;
52939 static const struct file_operations proc_sys_dir_file_operations;
52940-static const struct inode_operations proc_sys_dir_operations;
52941+const struct inode_operations proc_sys_dir_operations;
52942
52943 static struct inode *proc_sys_make_inode(struct super_block *sb,
52944 struct ctl_table_header *head, struct ctl_table *table)
52945@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
52946 if (!p)
52947 goto out;
52948
52949+ if (gr_handle_sysctl(p, MAY_EXEC))
52950+ goto out;
52951+
52952 err = ERR_PTR(-ENOMEM);
52953 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
52954 if (h)
52955@@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
52956
52957 err = NULL;
52958 dentry->d_op = &proc_sys_dentry_operations;
52959+
52960+ gr_handle_proc_create(dentry, inode);
52961+
52962 d_add(dentry, inode);
52963
52964 out:
52965@@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
52966 return -ENOMEM;
52967 } else {
52968 child->d_op = &proc_sys_dentry_operations;
52969+
52970+ gr_handle_proc_create(child, inode);
52971+
52972 d_add(child, inode);
52973 }
52974 } else {
52975@@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
52976 if (*pos < file->f_pos)
52977 continue;
52978
52979+ if (gr_handle_sysctl(table, 0))
52980+ continue;
52981+
52982 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
52983 if (res)
52984 return res;
52985@@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
52986 if (IS_ERR(head))
52987 return PTR_ERR(head);
52988
52989+ if (table && gr_handle_sysctl(table, MAY_EXEC))
52990+ return -ENOENT;
52991+
52992 generic_fillattr(inode, stat);
52993 if (table)
52994 stat->mode = (stat->mode & S_IFMT) | table->mode;
52995@@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
52996 };
52997
52998 static const struct file_operations proc_sys_dir_file_operations = {
52999+ .read = generic_read_dir,
53000 .readdir = proc_sys_readdir,
53001 .llseek = generic_file_llseek,
53002 };
53003
53004-static const struct inode_operations proc_sys_inode_operations = {
53005+const struct inode_operations proc_sys_inode_operations = {
53006 .permission = proc_sys_permission,
53007 .setattr = proc_sys_setattr,
53008 .getattr = proc_sys_getattr,
53009 };
53010
53011-static const struct inode_operations proc_sys_dir_operations = {
53012+const struct inode_operations proc_sys_dir_operations = {
53013 .lookup = proc_sys_lookup,
53014 .permission = proc_sys_permission,
53015 .setattr = proc_sys_setattr,
53016diff --git a/fs/proc/root.c b/fs/proc/root.c
53017index b080b79..d957e63 100644
53018--- a/fs/proc/root.c
53019+++ b/fs/proc/root.c
53020@@ -134,7 +134,15 @@ void __init proc_root_init(void)
53021 #ifdef CONFIG_PROC_DEVICETREE
53022 proc_device_tree_init();
53023 #endif
53024+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53025+#ifdef CONFIG_GRKERNSEC_PROC_USER
53026+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
53027+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53028+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
53029+#endif
53030+#else
53031 proc_mkdir("bus", NULL);
53032+#endif
53033 proc_sys_init();
53034 }
53035
53036diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
53037index 3b7b82a..7dbb571 100644
53038--- a/fs/proc/task_mmu.c
53039+++ b/fs/proc/task_mmu.c
53040@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53041 "VmStk:\t%8lu kB\n"
53042 "VmExe:\t%8lu kB\n"
53043 "VmLib:\t%8lu kB\n"
53044- "VmPTE:\t%8lu kB\n",
53045- hiwater_vm << (PAGE_SHIFT-10),
53046+ "VmPTE:\t%8lu kB\n"
53047+
53048+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53049+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
53050+#endif
53051+
53052+ ,hiwater_vm << (PAGE_SHIFT-10),
53053 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
53054 mm->locked_vm << (PAGE_SHIFT-10),
53055 hiwater_rss << (PAGE_SHIFT-10),
53056 total_rss << (PAGE_SHIFT-10),
53057 data << (PAGE_SHIFT-10),
53058 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
53059- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
53060+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
53061+
53062+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53063+ , mm->context.user_cs_base, mm->context.user_cs_limit
53064+#endif
53065+
53066+ );
53067 }
53068
53069 unsigned long task_vsize(struct mm_struct *mm)
53070@@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, void *v)
53071 struct proc_maps_private *priv = m->private;
53072 struct vm_area_struct *vma = v;
53073
53074- vma_stop(priv, vma);
53075+ if (!IS_ERR(vma))
53076+ vma_stop(priv, vma);
53077 if (priv->task)
53078 put_task_struct(priv->task);
53079 }
53080@@ -199,6 +211,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
53081 return ret;
53082 }
53083
53084+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53085+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53086+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
53087+ _mm->pax_flags & MF_PAX_SEGMEXEC))
53088+#endif
53089+
53090 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53091 {
53092 struct mm_struct *mm = vma->vm_mm;
53093@@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53094 int flags = vma->vm_flags;
53095 unsigned long ino = 0;
53096 unsigned long long pgoff = 0;
53097- unsigned long start;
53098 dev_t dev = 0;
53099 int len;
53100
53101@@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53102 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
53103 }
53104
53105- /* We don't show the stack guard page in /proc/maps */
53106- start = vma->vm_start;
53107- if (vma->vm_flags & VM_GROWSDOWN)
53108- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
53109- start += PAGE_SIZE;
53110-
53111 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
53112- start,
53113+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53114+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
53115+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
53116+#else
53117+ vma->vm_start,
53118 vma->vm_end,
53119+#endif
53120 flags & VM_READ ? 'r' : '-',
53121 flags & VM_WRITE ? 'w' : '-',
53122 flags & VM_EXEC ? 'x' : '-',
53123 flags & VM_MAYSHARE ? 's' : 'p',
53124+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53125+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
53126+#else
53127 pgoff,
53128+#endif
53129 MAJOR(dev), MINOR(dev), ino, &len);
53130
53131 /*
53132@@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53133 */
53134 if (file) {
53135 pad_len_spaces(m, len);
53136- seq_path(m, &file->f_path, "\n");
53137+ seq_path(m, &file->f_path, "\n\\");
53138 } else {
53139 const char *name = arch_vma_name(vma);
53140 if (!name) {
53141@@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53142 if (vma->vm_start <= mm->brk &&
53143 vma->vm_end >= mm->start_brk) {
53144 name = "[heap]";
53145- } else if (vma->vm_start <= mm->start_stack &&
53146- vma->vm_end >= mm->start_stack) {
53147+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
53148+ (vma->vm_start <= mm->start_stack &&
53149+ vma->vm_end >= mm->start_stack)) {
53150 name = "[stack]";
53151 }
53152 } else {
53153@@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m, void *v)
53154 };
53155
53156 memset(&mss, 0, sizeof mss);
53157- mss.vma = vma;
53158- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53159- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53160+
53161+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53162+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
53163+#endif
53164+ mss.vma = vma;
53165+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53166+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53167+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53168+ }
53169+#endif
53170
53171 show_map_vma(m, vma);
53172
53173@@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m, void *v)
53174 "Swap: %8lu kB\n"
53175 "KernelPageSize: %8lu kB\n"
53176 "MMUPageSize: %8lu kB\n",
53177+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53178+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
53179+#else
53180 (vma->vm_end - vma->vm_start) >> 10,
53181+#endif
53182 mss.resident >> 10,
53183 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
53184 mss.shared_clean >> 10,
53185diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
53186index 8f5c05d..c99c76d 100644
53187--- a/fs/proc/task_nommu.c
53188+++ b/fs/proc/task_nommu.c
53189@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53190 else
53191 bytes += kobjsize(mm);
53192
53193- if (current->fs && current->fs->users > 1)
53194+ if (current->fs && atomic_read(&current->fs->users) > 1)
53195 sbytes += kobjsize(current->fs);
53196 else
53197 bytes += kobjsize(current->fs);
53198@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
53199 if (len < 1)
53200 len = 1;
53201 seq_printf(m, "%*c", len, ' ');
53202- seq_path(m, &file->f_path, "");
53203+ seq_path(m, &file->f_path, "\n\\");
53204 }
53205
53206 seq_putc(m, '\n');
53207diff --git a/fs/readdir.c b/fs/readdir.c
53208index 7723401..30059a6 100644
53209--- a/fs/readdir.c
53210+++ b/fs/readdir.c
53211@@ -16,6 +16,7 @@
53212 #include <linux/security.h>
53213 #include <linux/syscalls.h>
53214 #include <linux/unistd.h>
53215+#include <linux/namei.h>
53216
53217 #include <asm/uaccess.h>
53218
53219@@ -67,6 +68,7 @@ struct old_linux_dirent {
53220
53221 struct readdir_callback {
53222 struct old_linux_dirent __user * dirent;
53223+ struct file * file;
53224 int result;
53225 };
53226
53227@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
53228 buf->result = -EOVERFLOW;
53229 return -EOVERFLOW;
53230 }
53231+
53232+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53233+ return 0;
53234+
53235 buf->result++;
53236 dirent = buf->dirent;
53237 if (!access_ok(VERIFY_WRITE, dirent,
53238@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
53239
53240 buf.result = 0;
53241 buf.dirent = dirent;
53242+ buf.file = file;
53243
53244 error = vfs_readdir(file, fillonedir, &buf);
53245 if (buf.result)
53246@@ -142,6 +149,7 @@ struct linux_dirent {
53247 struct getdents_callback {
53248 struct linux_dirent __user * current_dir;
53249 struct linux_dirent __user * previous;
53250+ struct file * file;
53251 int count;
53252 int error;
53253 };
53254@@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
53255 buf->error = -EOVERFLOW;
53256 return -EOVERFLOW;
53257 }
53258+
53259+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53260+ return 0;
53261+
53262 dirent = buf->previous;
53263 if (dirent) {
53264 if (__put_user(offset, &dirent->d_off))
53265@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
53266 buf.previous = NULL;
53267 buf.count = count;
53268 buf.error = 0;
53269+ buf.file = file;
53270
53271 error = vfs_readdir(file, filldir, &buf);
53272 if (error >= 0)
53273@@ -228,6 +241,7 @@ out:
53274 struct getdents_callback64 {
53275 struct linux_dirent64 __user * current_dir;
53276 struct linux_dirent64 __user * previous;
53277+ struct file *file;
53278 int count;
53279 int error;
53280 };
53281@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
53282 buf->error = -EINVAL; /* only used if we fail.. */
53283 if (reclen > buf->count)
53284 return -EINVAL;
53285+
53286+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53287+ return 0;
53288+
53289 dirent = buf->previous;
53290 if (dirent) {
53291 if (__put_user(offset, &dirent->d_off))
53292@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
53293
53294 buf.current_dir = dirent;
53295 buf.previous = NULL;
53296+ buf.file = file;
53297 buf.count = count;
53298 buf.error = 0;
53299
53300@@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
53301 error = buf.error;
53302 lastdirent = buf.previous;
53303 if (lastdirent) {
53304- typeof(lastdirent->d_off) d_off = file->f_pos;
53305+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
53306 if (__put_user(d_off, &lastdirent->d_off))
53307 error = -EFAULT;
53308 else
53309diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
53310index d42c30c..4fd8718 100644
53311--- a/fs/reiserfs/dir.c
53312+++ b/fs/reiserfs/dir.c
53313@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
53314 struct reiserfs_dir_entry de;
53315 int ret = 0;
53316
53317+ pax_track_stack();
53318+
53319 reiserfs_write_lock(inode->i_sb);
53320
53321 reiserfs_check_lock_depth(inode->i_sb, "readdir");
53322diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
53323index 128d3f7..8840d44 100644
53324--- a/fs/reiserfs/do_balan.c
53325+++ b/fs/reiserfs/do_balan.c
53326@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
53327 return;
53328 }
53329
53330- atomic_inc(&(fs_generation(tb->tb_sb)));
53331+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
53332 do_balance_starts(tb);
53333
53334 /* balance leaf returns 0 except if combining L R and S into
53335diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
53336index 72cb1cc..d0e3181 100644
53337--- a/fs/reiserfs/item_ops.c
53338+++ b/fs/reiserfs/item_ops.c
53339@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
53340 vi->vi_index, vi->vi_type, vi->vi_ih);
53341 }
53342
53343-static struct item_operations stat_data_ops = {
53344+static const struct item_operations stat_data_ops = {
53345 .bytes_number = sd_bytes_number,
53346 .decrement_key = sd_decrement_key,
53347 .is_left_mergeable = sd_is_left_mergeable,
53348@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
53349 vi->vi_index, vi->vi_type, vi->vi_ih);
53350 }
53351
53352-static struct item_operations direct_ops = {
53353+static const struct item_operations direct_ops = {
53354 .bytes_number = direct_bytes_number,
53355 .decrement_key = direct_decrement_key,
53356 .is_left_mergeable = direct_is_left_mergeable,
53357@@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
53358 vi->vi_index, vi->vi_type, vi->vi_ih);
53359 }
53360
53361-static struct item_operations indirect_ops = {
53362+static const struct item_operations indirect_ops = {
53363 .bytes_number = indirect_bytes_number,
53364 .decrement_key = indirect_decrement_key,
53365 .is_left_mergeable = indirect_is_left_mergeable,
53366@@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
53367 printk("\n");
53368 }
53369
53370-static struct item_operations direntry_ops = {
53371+static const struct item_operations direntry_ops = {
53372 .bytes_number = direntry_bytes_number,
53373 .decrement_key = direntry_decrement_key,
53374 .is_left_mergeable = direntry_is_left_mergeable,
53375@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
53376 "Invalid item type observed, run fsck ASAP");
53377 }
53378
53379-static struct item_operations errcatch_ops = {
53380+static const struct item_operations errcatch_ops = {
53381 errcatch_bytes_number,
53382 errcatch_decrement_key,
53383 errcatch_is_left_mergeable,
53384@@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
53385 #error Item types must use disk-format assigned values.
53386 #endif
53387
53388-struct item_operations *item_ops[TYPE_ANY + 1] = {
53389+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
53390 &stat_data_ops,
53391 &indirect_ops,
53392 &direct_ops,
53393diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
53394index b5fe0aa..e0e25c4 100644
53395--- a/fs/reiserfs/journal.c
53396+++ b/fs/reiserfs/journal.c
53397@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
53398 struct buffer_head *bh;
53399 int i, j;
53400
53401+ pax_track_stack();
53402+
53403 bh = __getblk(dev, block, bufsize);
53404 if (buffer_uptodate(bh))
53405 return (bh);
53406diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
53407index 2715791..b8996db 100644
53408--- a/fs/reiserfs/namei.c
53409+++ b/fs/reiserfs/namei.c
53410@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
53411 unsigned long savelink = 1;
53412 struct timespec ctime;
53413
53414+ pax_track_stack();
53415+
53416 /* three balancings: (1) old name removal, (2) new name insertion
53417 and (3) maybe "save" link insertion
53418 stat data updates: (1) old directory,
53419diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
53420index 9229e55..3d2e3b7 100644
53421--- a/fs/reiserfs/procfs.c
53422+++ b/fs/reiserfs/procfs.c
53423@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
53424 "SMALL_TAILS " : "NO_TAILS ",
53425 replay_only(sb) ? "REPLAY_ONLY " : "",
53426 convert_reiserfs(sb) ? "CONV " : "",
53427- atomic_read(&r->s_generation_counter),
53428+ atomic_read_unchecked(&r->s_generation_counter),
53429 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
53430 SF(s_do_balance), SF(s_unneeded_left_neighbor),
53431 SF(s_good_search_by_key_reada), SF(s_bmaps),
53432@@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
53433 struct journal_params *jp = &rs->s_v1.s_journal;
53434 char b[BDEVNAME_SIZE];
53435
53436+ pax_track_stack();
53437+
53438 seq_printf(m, /* on-disk fields */
53439 "jp_journal_1st_block: \t%i\n"
53440 "jp_journal_dev: \t%s[%x]\n"
53441diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
53442index d036ee5..4c7dca1 100644
53443--- a/fs/reiserfs/stree.c
53444+++ b/fs/reiserfs/stree.c
53445@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
53446 int iter = 0;
53447 #endif
53448
53449+ pax_track_stack();
53450+
53451 BUG_ON(!th->t_trans_id);
53452
53453 init_tb_struct(th, &s_del_balance, sb, path,
53454@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
53455 int retval;
53456 int quota_cut_bytes = 0;
53457
53458+ pax_track_stack();
53459+
53460 BUG_ON(!th->t_trans_id);
53461
53462 le_key2cpu_key(&cpu_key, key);
53463@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
53464 int quota_cut_bytes;
53465 loff_t tail_pos = 0;
53466
53467+ pax_track_stack();
53468+
53469 BUG_ON(!th->t_trans_id);
53470
53471 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
53472@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
53473 int retval;
53474 int fs_gen;
53475
53476+ pax_track_stack();
53477+
53478 BUG_ON(!th->t_trans_id);
53479
53480 fs_gen = get_generation(inode->i_sb);
53481@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
53482 int fs_gen = 0;
53483 int quota_bytes = 0;
53484
53485+ pax_track_stack();
53486+
53487 BUG_ON(!th->t_trans_id);
53488
53489 if (inode) { /* Do we count quotas for item? */
53490diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
53491index f0ad05f..af3306f 100644
53492--- a/fs/reiserfs/super.c
53493+++ b/fs/reiserfs/super.c
53494@@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
53495 {.option_name = NULL}
53496 };
53497
53498+ pax_track_stack();
53499+
53500 *blocks = 0;
53501 if (!options || !*options)
53502 /* use default configuration: create tails, journaling on, no
53503diff --git a/fs/select.c b/fs/select.c
53504index fd38ce2..f5381b8 100644
53505--- a/fs/select.c
53506+++ b/fs/select.c
53507@@ -20,6 +20,7 @@
53508 #include <linux/module.h>
53509 #include <linux/slab.h>
53510 #include <linux/poll.h>
53511+#include <linux/security.h>
53512 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
53513 #include <linux/file.h>
53514 #include <linux/fdtable.h>
53515@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
53516 int retval, i, timed_out = 0;
53517 unsigned long slack = 0;
53518
53519+ pax_track_stack();
53520+
53521 rcu_read_lock();
53522 retval = max_select_fd(n, fds);
53523 rcu_read_unlock();
53524@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
53525 /* Allocate small arguments on the stack to save memory and be faster */
53526 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
53527
53528+ pax_track_stack();
53529+
53530 ret = -EINVAL;
53531 if (n < 0)
53532 goto out_nofds;
53533@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
53534 struct poll_list *walk = head;
53535 unsigned long todo = nfds;
53536
53537+ pax_track_stack();
53538+
53539+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
53540 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
53541 return -EINVAL;
53542
53543diff --git a/fs/seq_file.c b/fs/seq_file.c
53544index eae7d9d..679f099 100644
53545--- a/fs/seq_file.c
53546+++ b/fs/seq_file.c
53547@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
53548 return 0;
53549 }
53550 if (!m->buf) {
53551- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
53552+ m->size = PAGE_SIZE;
53553+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
53554 if (!m->buf)
53555 return -ENOMEM;
53556 }
53557@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
53558 Eoverflow:
53559 m->op->stop(m, p);
53560 kfree(m->buf);
53561- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
53562+ m->size <<= 1;
53563+ m->buf = kmalloc(m->size, GFP_KERNEL);
53564 return !m->buf ? -ENOMEM : -EAGAIN;
53565 }
53566
53567@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
53568 m->version = file->f_version;
53569 /* grab buffer if we didn't have one */
53570 if (!m->buf) {
53571- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
53572+ m->size = PAGE_SIZE;
53573+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
53574 if (!m->buf)
53575 goto Enomem;
53576 }
53577@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
53578 goto Fill;
53579 m->op->stop(m, p);
53580 kfree(m->buf);
53581- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
53582+ m->size <<= 1;
53583+ m->buf = kmalloc(m->size, GFP_KERNEL);
53584 if (!m->buf)
53585 goto Enomem;
53586 m->count = 0;
53587@@ -551,7 +555,7 @@ static void single_stop(struct seq_file *p, void *v)
53588 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
53589 void *data)
53590 {
53591- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
53592+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
53593 int res = -ENOMEM;
53594
53595 if (op) {
53596diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
53597index 71c29b6..54694dd 100644
53598--- a/fs/smbfs/proc.c
53599+++ b/fs/smbfs/proc.c
53600@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
53601
53602 out:
53603 if (server->local_nls != NULL && server->remote_nls != NULL)
53604- server->ops->convert = convert_cp;
53605+ *(void **)&server->ops->convert = convert_cp;
53606 else
53607- server->ops->convert = convert_memcpy;
53608+ *(void **)&server->ops->convert = convert_memcpy;
53609
53610 smb_unlock_server(server);
53611 return n;
53612@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
53613
53614 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
53615 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
53616- server->ops->getattr = smb_proc_getattr_core;
53617+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
53618 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
53619- server->ops->getattr = smb_proc_getattr_ff;
53620+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
53621 }
53622
53623 /* Decode server capabilities */
53624@@ -3439,7 +3439,7 @@ out:
53625 static void
53626 install_ops(struct smb_ops *dst, struct smb_ops *src)
53627 {
53628- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
53629+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
53630 }
53631
53632 /* < LANMAN2 */
53633diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
53634index 00b2909..2ace383 100644
53635--- a/fs/smbfs/symlink.c
53636+++ b/fs/smbfs/symlink.c
53637@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
53638
53639 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
53640 {
53641- char *s = nd_get_link(nd);
53642+ const char *s = nd_get_link(nd);
53643 if (!IS_ERR(s))
53644 __putname(s);
53645 }
53646diff --git a/fs/splice.c b/fs/splice.c
53647index bb92b7c..5aa72b0 100644
53648--- a/fs/splice.c
53649+++ b/fs/splice.c
53650@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
53651 pipe_lock(pipe);
53652
53653 for (;;) {
53654- if (!pipe->readers) {
53655+ if (!atomic_read(&pipe->readers)) {
53656 send_sig(SIGPIPE, current, 0);
53657 if (!ret)
53658 ret = -EPIPE;
53659@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
53660 do_wakeup = 0;
53661 }
53662
53663- pipe->waiting_writers++;
53664+ atomic_inc(&pipe->waiting_writers);
53665 pipe_wait(pipe);
53666- pipe->waiting_writers--;
53667+ atomic_dec(&pipe->waiting_writers);
53668 }
53669
53670 pipe_unlock(pipe);
53671@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
53672 .spd_release = spd_release_page,
53673 };
53674
53675+ pax_track_stack();
53676+
53677 index = *ppos >> PAGE_CACHE_SHIFT;
53678 loff = *ppos & ~PAGE_CACHE_MASK;
53679 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
53680@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
53681 old_fs = get_fs();
53682 set_fs(get_ds());
53683 /* The cast to a user pointer is valid due to the set_fs() */
53684- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
53685+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
53686 set_fs(old_fs);
53687
53688 return res;
53689@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
53690 old_fs = get_fs();
53691 set_fs(get_ds());
53692 /* The cast to a user pointer is valid due to the set_fs() */
53693- res = vfs_write(file, (const char __user *)buf, count, &pos);
53694+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
53695 set_fs(old_fs);
53696
53697 return res;
53698@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
53699 .spd_release = spd_release_page,
53700 };
53701
53702+ pax_track_stack();
53703+
53704 index = *ppos >> PAGE_CACHE_SHIFT;
53705 offset = *ppos & ~PAGE_CACHE_MASK;
53706 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
53707@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
53708 goto err;
53709
53710 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
53711- vec[i].iov_base = (void __user *) page_address(page);
53712+ vec[i].iov_base = (__force void __user *) page_address(page);
53713 vec[i].iov_len = this_len;
53714 pages[i] = page;
53715 spd.nr_pages++;
53716@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
53717 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
53718 {
53719 while (!pipe->nrbufs) {
53720- if (!pipe->writers)
53721+ if (!atomic_read(&pipe->writers))
53722 return 0;
53723
53724- if (!pipe->waiting_writers && sd->num_spliced)
53725+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
53726 return 0;
53727
53728 if (sd->flags & SPLICE_F_NONBLOCK)
53729@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
53730 * out of the pipe right after the splice_to_pipe(). So set
53731 * PIPE_READERS appropriately.
53732 */
53733- pipe->readers = 1;
53734+ atomic_set(&pipe->readers, 1);
53735
53736 current->splice_pipe = pipe;
53737 }
53738@@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
53739 .spd_release = spd_release_page,
53740 };
53741
53742+ pax_track_stack();
53743+
53744 pipe = pipe_info(file->f_path.dentry->d_inode);
53745 if (!pipe)
53746 return -EBADF;
53747@@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
53748 ret = -ERESTARTSYS;
53749 break;
53750 }
53751- if (!pipe->writers)
53752+ if (!atomic_read(&pipe->writers))
53753 break;
53754- if (!pipe->waiting_writers) {
53755+ if (!atomic_read(&pipe->waiting_writers)) {
53756 if (flags & SPLICE_F_NONBLOCK) {
53757 ret = -EAGAIN;
53758 break;
53759@@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
53760 pipe_lock(pipe);
53761
53762 while (pipe->nrbufs >= PIPE_BUFFERS) {
53763- if (!pipe->readers) {
53764+ if (!atomic_read(&pipe->readers)) {
53765 send_sig(SIGPIPE, current, 0);
53766 ret = -EPIPE;
53767 break;
53768@@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
53769 ret = -ERESTARTSYS;
53770 break;
53771 }
53772- pipe->waiting_writers++;
53773+ atomic_inc(&pipe->waiting_writers);
53774 pipe_wait(pipe);
53775- pipe->waiting_writers--;
53776+ atomic_dec(&pipe->waiting_writers);
53777 }
53778
53779 pipe_unlock(pipe);
53780@@ -1786,14 +1792,14 @@ retry:
53781 pipe_double_lock(ipipe, opipe);
53782
53783 do {
53784- if (!opipe->readers) {
53785+ if (!atomic_read(&opipe->readers)) {
53786 send_sig(SIGPIPE, current, 0);
53787 if (!ret)
53788 ret = -EPIPE;
53789 break;
53790 }
53791
53792- if (!ipipe->nrbufs && !ipipe->writers)
53793+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
53794 break;
53795
53796 /*
53797@@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
53798 pipe_double_lock(ipipe, opipe);
53799
53800 do {
53801- if (!opipe->readers) {
53802+ if (!atomic_read(&opipe->readers)) {
53803 send_sig(SIGPIPE, current, 0);
53804 if (!ret)
53805 ret = -EPIPE;
53806@@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
53807 * return EAGAIN if we have the potential of some data in the
53808 * future, otherwise just return 0
53809 */
53810- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
53811+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
53812 ret = -EAGAIN;
53813
53814 pipe_unlock(ipipe);
53815diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
53816index 7118a38..70af853 100644
53817--- a/fs/sysfs/file.c
53818+++ b/fs/sysfs/file.c
53819@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
53820
53821 struct sysfs_open_dirent {
53822 atomic_t refcnt;
53823- atomic_t event;
53824+ atomic_unchecked_t event;
53825 wait_queue_head_t poll;
53826 struct list_head buffers; /* goes through sysfs_buffer.list */
53827 };
53828@@ -53,7 +53,7 @@ struct sysfs_buffer {
53829 size_t count;
53830 loff_t pos;
53831 char * page;
53832- struct sysfs_ops * ops;
53833+ const struct sysfs_ops * ops;
53834 struct mutex mutex;
53835 int needs_read_fill;
53836 int event;
53837@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
53838 {
53839 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
53840 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
53841- struct sysfs_ops * ops = buffer->ops;
53842+ const struct sysfs_ops * ops = buffer->ops;
53843 int ret = 0;
53844 ssize_t count;
53845
53846@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
53847 if (!sysfs_get_active_two(attr_sd))
53848 return -ENODEV;
53849
53850- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
53851+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
53852 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
53853
53854 sysfs_put_active_two(attr_sd);
53855@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
53856 {
53857 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
53858 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
53859- struct sysfs_ops * ops = buffer->ops;
53860+ const struct sysfs_ops * ops = buffer->ops;
53861 int rc;
53862
53863 /* need attr_sd for attr and ops, its parent for kobj */
53864@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
53865 return -ENOMEM;
53866
53867 atomic_set(&new_od->refcnt, 0);
53868- atomic_set(&new_od->event, 1);
53869+ atomic_set_unchecked(&new_od->event, 1);
53870 init_waitqueue_head(&new_od->poll);
53871 INIT_LIST_HEAD(&new_od->buffers);
53872 goto retry;
53873@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
53874 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
53875 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
53876 struct sysfs_buffer *buffer;
53877- struct sysfs_ops *ops;
53878+ const struct sysfs_ops *ops;
53879 int error = -EACCES;
53880 char *p;
53881
53882@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
53883
53884 sysfs_put_active_two(attr_sd);
53885
53886- if (buffer->event != atomic_read(&od->event))
53887+ if (buffer->event != atomic_read_unchecked(&od->event))
53888 goto trigger;
53889
53890 return DEFAULT_POLLMASK;
53891@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
53892
53893 od = sd->s_attr.open;
53894 if (od) {
53895- atomic_inc(&od->event);
53896+ atomic_inc_unchecked(&od->event);
53897 wake_up_interruptible(&od->poll);
53898 }
53899
53900diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
53901index 4974995..c26609c 100644
53902--- a/fs/sysfs/mount.c
53903+++ b/fs/sysfs/mount.c
53904@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
53905 .s_name = "",
53906 .s_count = ATOMIC_INIT(1),
53907 .s_flags = SYSFS_DIR,
53908+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
53909+ .s_mode = S_IFDIR | S_IRWXU,
53910+#else
53911 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
53912+#endif
53913 .s_ino = 1,
53914 };
53915
53916diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
53917index c5081ad..342ea86 100644
53918--- a/fs/sysfs/symlink.c
53919+++ b/fs/sysfs/symlink.c
53920@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
53921
53922 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
53923 {
53924- char *page = nd_get_link(nd);
53925+ const char *page = nd_get_link(nd);
53926 if (!IS_ERR(page))
53927 free_page((unsigned long)page);
53928 }
53929diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
53930index 1e06853..b06d325 100644
53931--- a/fs/udf/balloc.c
53932+++ b/fs/udf/balloc.c
53933@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
53934
53935 mutex_lock(&sbi->s_alloc_mutex);
53936 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
53937- if (bloc->logicalBlockNum < 0 ||
53938- (bloc->logicalBlockNum + count) >
53939- partmap->s_partition_len) {
53940+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
53941 udf_debug("%d < %d || %d + %d > %d\n",
53942 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
53943 count, partmap->s_partition_len);
53944@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
53945
53946 mutex_lock(&sbi->s_alloc_mutex);
53947 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
53948- if (bloc->logicalBlockNum < 0 ||
53949- (bloc->logicalBlockNum + count) >
53950- partmap->s_partition_len) {
53951+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
53952 udf_debug("%d < %d || %d + %d > %d\n",
53953 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
53954 partmap->s_partition_len);
53955diff --git a/fs/udf/inode.c b/fs/udf/inode.c
53956index 6d24c2c..fff470f 100644
53957--- a/fs/udf/inode.c
53958+++ b/fs/udf/inode.c
53959@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
53960 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
53961 int lastblock = 0;
53962
53963+ pax_track_stack();
53964+
53965 prev_epos.offset = udf_file_entry_alloc_offset(inode);
53966 prev_epos.block = iinfo->i_location;
53967 prev_epos.bh = NULL;
53968diff --git a/fs/udf/misc.c b/fs/udf/misc.c
53969index 9215700..bf1f68e 100644
53970--- a/fs/udf/misc.c
53971+++ b/fs/udf/misc.c
53972@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
53973
53974 u8 udf_tag_checksum(const struct tag *t)
53975 {
53976- u8 *data = (u8 *)t;
53977+ const u8 *data = (const u8 *)t;
53978 u8 checksum = 0;
53979 int i;
53980 for (i = 0; i < sizeof(struct tag); ++i)
53981diff --git a/fs/utimes.c b/fs/utimes.c
53982index e4c75db..b4df0e0 100644
53983--- a/fs/utimes.c
53984+++ b/fs/utimes.c
53985@@ -1,6 +1,7 @@
53986 #include <linux/compiler.h>
53987 #include <linux/file.h>
53988 #include <linux/fs.h>
53989+#include <linux/security.h>
53990 #include <linux/linkage.h>
53991 #include <linux/mount.h>
53992 #include <linux/namei.h>
53993@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
53994 goto mnt_drop_write_and_out;
53995 }
53996 }
53997+
53998+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
53999+ error = -EACCES;
54000+ goto mnt_drop_write_and_out;
54001+ }
54002+
54003 mutex_lock(&inode->i_mutex);
54004 error = notify_change(path->dentry, &newattrs);
54005 mutex_unlock(&inode->i_mutex);
54006diff --git a/fs/xattr.c b/fs/xattr.c
54007index 6d4f6d3..cda3958 100644
54008--- a/fs/xattr.c
54009+++ b/fs/xattr.c
54010@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
54011 * Extended attribute SET operations
54012 */
54013 static long
54014-setxattr(struct dentry *d, const char __user *name, const void __user *value,
54015+setxattr(struct path *path, const char __user *name, const void __user *value,
54016 size_t size, int flags)
54017 {
54018 int error;
54019@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
54020 return PTR_ERR(kvalue);
54021 }
54022
54023- error = vfs_setxattr(d, kname, kvalue, size, flags);
54024+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
54025+ error = -EACCES;
54026+ goto out;
54027+ }
54028+
54029+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
54030+out:
54031 kfree(kvalue);
54032 return error;
54033 }
54034@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
54035 return error;
54036 error = mnt_want_write(path.mnt);
54037 if (!error) {
54038- error = setxattr(path.dentry, name, value, size, flags);
54039+ error = setxattr(&path, name, value, size, flags);
54040 mnt_drop_write(path.mnt);
54041 }
54042 path_put(&path);
54043@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
54044 return error;
54045 error = mnt_want_write(path.mnt);
54046 if (!error) {
54047- error = setxattr(path.dentry, name, value, size, flags);
54048+ error = setxattr(&path, name, value, size, flags);
54049 mnt_drop_write(path.mnt);
54050 }
54051 path_put(&path);
54052@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
54053 const void __user *,value, size_t, size, int, flags)
54054 {
54055 struct file *f;
54056- struct dentry *dentry;
54057 int error = -EBADF;
54058
54059 f = fget(fd);
54060 if (!f)
54061 return error;
54062- dentry = f->f_path.dentry;
54063- audit_inode(NULL, dentry);
54064+ audit_inode(NULL, f->f_path.dentry);
54065 error = mnt_want_write_file(f);
54066 if (!error) {
54067- error = setxattr(dentry, name, value, size, flags);
54068+ error = setxattr(&f->f_path, name, value, size, flags);
54069 mnt_drop_write(f->f_path.mnt);
54070 }
54071 fput(f);
54072diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
54073index c6ad7c7..f2847a7 100644
54074--- a/fs/xattr_acl.c
54075+++ b/fs/xattr_acl.c
54076@@ -17,8 +17,8 @@
54077 struct posix_acl *
54078 posix_acl_from_xattr(const void *value, size_t size)
54079 {
54080- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
54081- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
54082+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
54083+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
54084 int count;
54085 struct posix_acl *acl;
54086 struct posix_acl_entry *acl_e;
54087diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
54088index 942362f..88f96f5 100644
54089--- a/fs/xfs/linux-2.6/xfs_ioctl.c
54090+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
54091@@ -134,7 +134,7 @@ xfs_find_handle(
54092 }
54093
54094 error = -EFAULT;
54095- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
54096+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
54097 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
54098 goto out_put;
54099
54100@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
54101 if (IS_ERR(dentry))
54102 return PTR_ERR(dentry);
54103
54104- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
54105+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
54106 if (!kbuf)
54107 goto out_dput;
54108
54109@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
54110 xfs_mount_t *mp,
54111 void __user *arg)
54112 {
54113- xfs_fsop_geom_t fsgeo;
54114+ xfs_fsop_geom_t fsgeo;
54115 int error;
54116
54117 error = xfs_fs_geometry(mp, &fsgeo, 3);
54118diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
54119index bad485a..479bd32 100644
54120--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
54121+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
54122@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
54123 xfs_fsop_geom_t fsgeo;
54124 int error;
54125
54126+ memset(&fsgeo, 0, sizeof(fsgeo));
54127 error = xfs_fs_geometry(mp, &fsgeo, 3);
54128 if (error)
54129 return -error;
54130diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
54131index 1f3b4b8..6102f6d 100644
54132--- a/fs/xfs/linux-2.6/xfs_iops.c
54133+++ b/fs/xfs/linux-2.6/xfs_iops.c
54134@@ -468,7 +468,7 @@ xfs_vn_put_link(
54135 struct nameidata *nd,
54136 void *p)
54137 {
54138- char *s = nd_get_link(nd);
54139+ const char *s = nd_get_link(nd);
54140
54141 if (!IS_ERR(s))
54142 kfree(s);
54143diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
54144index 8971fb0..5fc1eb2 100644
54145--- a/fs/xfs/xfs_bmap.c
54146+++ b/fs/xfs/xfs_bmap.c
54147@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
54148 int nmap,
54149 int ret_nmap);
54150 #else
54151-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
54152+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
54153 #endif /* DEBUG */
54154
54155 #if defined(XFS_RW_TRACE)
54156diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
54157index e89734e..5e84d8d 100644
54158--- a/fs/xfs/xfs_dir2_sf.c
54159+++ b/fs/xfs/xfs_dir2_sf.c
54160@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
54161 }
54162
54163 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
54164- if (filldir(dirent, sfep->name, sfep->namelen,
54165+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
54166+ char name[sfep->namelen];
54167+ memcpy(name, sfep->name, sfep->namelen);
54168+ if (filldir(dirent, name, sfep->namelen,
54169+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
54170+ *offset = off & 0x7fffffff;
54171+ return 0;
54172+ }
54173+ } else if (filldir(dirent, sfep->name, sfep->namelen,
54174 off & 0x7fffffff, ino, DT_UNKNOWN)) {
54175 *offset = off & 0x7fffffff;
54176 return 0;
54177diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
54178index 8f32f50..859e8a3 100644
54179--- a/fs/xfs/xfs_vnodeops.c
54180+++ b/fs/xfs/xfs_vnodeops.c
54181@@ -564,13 +564,17 @@ xfs_readlink(
54182
54183 xfs_ilock(ip, XFS_ILOCK_SHARED);
54184
54185- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
54186- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
54187-
54188 pathlen = ip->i_d.di_size;
54189 if (!pathlen)
54190 goto out;
54191
54192+ if (pathlen > MAXPATHLEN) {
54193+ xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
54194+ __func__, (unsigned long long)ip->i_ino, pathlen);
54195+ ASSERT(0);
54196+ return XFS_ERROR(EFSCORRUPTED);
54197+ }
54198+
54199 if (ip->i_df.if_flags & XFS_IFINLINE) {
54200 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
54201 link[pathlen] = '\0';
54202diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
54203new file mode 100644
54204index 0000000..f27a8e8
54205--- /dev/null
54206+++ b/grsecurity/Kconfig
54207@@ -0,0 +1,1036 @@
54208+#
54209+# grecurity configuration
54210+#
54211+
54212+menu "Grsecurity"
54213+
54214+config GRKERNSEC
54215+ bool "Grsecurity"
54216+ select CRYPTO
54217+ select CRYPTO_SHA256
54218+ help
54219+ If you say Y here, you will be able to configure many features
54220+ that will enhance the security of your system. It is highly
54221+ recommended that you say Y here and read through the help
54222+ for each option so that you fully understand the features and
54223+ can evaluate their usefulness for your machine.
54224+
54225+choice
54226+ prompt "Security Level"
54227+ depends on GRKERNSEC
54228+ default GRKERNSEC_CUSTOM
54229+
54230+config GRKERNSEC_LOW
54231+ bool "Low"
54232+ select GRKERNSEC_LINK
54233+ select GRKERNSEC_FIFO
54234+ select GRKERNSEC_RANDNET
54235+ select GRKERNSEC_DMESG
54236+ select GRKERNSEC_CHROOT
54237+ select GRKERNSEC_CHROOT_CHDIR
54238+
54239+ help
54240+ If you choose this option, several of the grsecurity options will
54241+ be enabled that will give you greater protection against a number
54242+ of attacks, while assuring that none of your software will have any
54243+ conflicts with the additional security measures. If you run a lot
54244+ of unusual software, or you are having problems with the higher
54245+ security levels, you should say Y here. With this option, the
54246+ following features are enabled:
54247+
54248+ - Linking restrictions
54249+ - FIFO restrictions
54250+ - Restricted dmesg
54251+ - Enforced chdir("/") on chroot
54252+ - Runtime module disabling
54253+
54254+config GRKERNSEC_MEDIUM
54255+ bool "Medium"
54256+ select PAX
54257+ select PAX_EI_PAX
54258+ select PAX_PT_PAX_FLAGS
54259+ select PAX_HAVE_ACL_FLAGS
54260+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54261+ select GRKERNSEC_CHROOT
54262+ select GRKERNSEC_CHROOT_SYSCTL
54263+ select GRKERNSEC_LINK
54264+ select GRKERNSEC_FIFO
54265+ select GRKERNSEC_DMESG
54266+ select GRKERNSEC_RANDNET
54267+ select GRKERNSEC_FORKFAIL
54268+ select GRKERNSEC_TIME
54269+ select GRKERNSEC_SIGNAL
54270+ select GRKERNSEC_CHROOT
54271+ select GRKERNSEC_CHROOT_UNIX
54272+ select GRKERNSEC_CHROOT_MOUNT
54273+ select GRKERNSEC_CHROOT_PIVOT
54274+ select GRKERNSEC_CHROOT_DOUBLE
54275+ select GRKERNSEC_CHROOT_CHDIR
54276+ select GRKERNSEC_CHROOT_MKNOD
54277+ select GRKERNSEC_PROC
54278+ select GRKERNSEC_PROC_USERGROUP
54279+ select PAX_RANDUSTACK
54280+ select PAX_ASLR
54281+ select PAX_RANDMMAP
54282+ select PAX_REFCOUNT if (X86 || SPARC64)
54283+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
54284+
54285+ help
54286+ If you say Y here, several features in addition to those included
54287+ in the low additional security level will be enabled. These
54288+ features provide even more security to your system, though in rare
54289+ cases they may be incompatible with very old or poorly written
54290+ software. If you enable this option, make sure that your auth
54291+ service (identd) is running as gid 1001. With this option,
54292+ the following features (in addition to those provided in the
54293+ low additional security level) will be enabled:
54294+
54295+ - Failed fork logging
54296+ - Time change logging
54297+ - Signal logging
54298+ - Deny mounts in chroot
54299+ - Deny double chrooting
54300+ - Deny sysctl writes in chroot
54301+ - Deny mknod in chroot
54302+ - Deny access to abstract AF_UNIX sockets out of chroot
54303+ - Deny pivot_root in chroot
54304+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
54305+ - /proc restrictions with special GID set to 10 (usually wheel)
54306+ - Address Space Layout Randomization (ASLR)
54307+ - Prevent exploitation of most refcount overflows
54308+ - Bounds checking of copying between the kernel and userland
54309+
54310+config GRKERNSEC_HIGH
54311+ bool "High"
54312+ select GRKERNSEC_LINK
54313+ select GRKERNSEC_FIFO
54314+ select GRKERNSEC_DMESG
54315+ select GRKERNSEC_FORKFAIL
54316+ select GRKERNSEC_TIME
54317+ select GRKERNSEC_SIGNAL
54318+ select GRKERNSEC_CHROOT
54319+ select GRKERNSEC_CHROOT_SHMAT
54320+ select GRKERNSEC_CHROOT_UNIX
54321+ select GRKERNSEC_CHROOT_MOUNT
54322+ select GRKERNSEC_CHROOT_FCHDIR
54323+ select GRKERNSEC_CHROOT_PIVOT
54324+ select GRKERNSEC_CHROOT_DOUBLE
54325+ select GRKERNSEC_CHROOT_CHDIR
54326+ select GRKERNSEC_CHROOT_MKNOD
54327+ select GRKERNSEC_CHROOT_CAPS
54328+ select GRKERNSEC_CHROOT_SYSCTL
54329+ select GRKERNSEC_CHROOT_FINDTASK
54330+ select GRKERNSEC_SYSFS_RESTRICT
54331+ select GRKERNSEC_PROC
54332+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54333+ select GRKERNSEC_HIDESYM
54334+ select GRKERNSEC_BRUTE
54335+ select GRKERNSEC_PROC_USERGROUP
54336+ select GRKERNSEC_KMEM
54337+ select GRKERNSEC_RESLOG
54338+ select GRKERNSEC_RANDNET
54339+ select GRKERNSEC_PROC_ADD
54340+ select GRKERNSEC_CHROOT_CHMOD
54341+ select GRKERNSEC_CHROOT_NICE
54342+ select GRKERNSEC_AUDIT_MOUNT
54343+ select GRKERNSEC_MODHARDEN if (MODULES)
54344+ select GRKERNSEC_HARDEN_PTRACE
54345+ select GRKERNSEC_VM86 if (X86_32)
54346+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
54347+ select PAX
54348+ select PAX_RANDUSTACK
54349+ select PAX_ASLR
54350+ select PAX_RANDMMAP
54351+ select PAX_NOEXEC
54352+ select PAX_MPROTECT
54353+ select PAX_EI_PAX
54354+ select PAX_PT_PAX_FLAGS
54355+ select PAX_HAVE_ACL_FLAGS
54356+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
54357+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
54358+ select PAX_RANDKSTACK if (X86_TSC && X86)
54359+ select PAX_SEGMEXEC if (X86_32)
54360+ select PAX_PAGEEXEC
54361+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
54362+ select PAX_EMUTRAMP if (PARISC)
54363+ select PAX_EMUSIGRT if (PARISC)
54364+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
54365+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
54366+ select PAX_REFCOUNT if (X86 || SPARC64)
54367+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
54368+ help
54369+ If you say Y here, many of the features of grsecurity will be
54370+ enabled, which will protect you against many kinds of attacks
54371+ against your system. The heightened security comes at a cost
54372+ of an increased chance of incompatibilities with rare software
54373+ on your machine. Since this security level enables PaX, you should
54374+ view <http://pax.grsecurity.net> and read about the PaX
54375+ project. While you are there, download chpax and run it on
54376+ binaries that cause problems with PaX. Also remember that
54377+ since the /proc restrictions are enabled, you must run your
54378+ identd as gid 1001. This security level enables the following
54379+ features in addition to those listed in the low and medium
54380+ security levels:
54381+
54382+ - Additional /proc restrictions
54383+ - Chmod restrictions in chroot
54384+ - No signals, ptrace, or viewing of processes outside of chroot
54385+ - Capability restrictions in chroot
54386+ - Deny fchdir out of chroot
54387+ - Priority restrictions in chroot
54388+ - Segmentation-based implementation of PaX
54389+ - Mprotect restrictions
54390+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
54391+ - Kernel stack randomization
54392+ - Mount/unmount/remount logging
54393+ - Kernel symbol hiding
54394+ - Hardening of module auto-loading
54395+ - Ptrace restrictions
54396+ - Restricted vm86 mode
54397+ - Restricted sysfs/debugfs
54398+ - Active kernel exploit response
54399+
54400+config GRKERNSEC_CUSTOM
54401+ bool "Custom"
54402+ help
54403+ If you say Y here, you will be able to configure every grsecurity
54404+ option, which allows you to enable many more features that aren't
54405+ covered in the basic security levels. These additional features
54406+ include TPE, socket restrictions, and the sysctl system for
54407+ grsecurity. It is advised that you read through the help for
54408+ each option to determine its usefulness in your situation.
54409+
54410+endchoice
54411+
54412+menu "Address Space Protection"
54413+depends on GRKERNSEC
54414+
54415+config GRKERNSEC_KMEM
54416+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
54417+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
54418+ help
54419+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
54420+ be written to or read from to modify or leak the contents of the running
54421+ kernel. /dev/port will also not be allowed to be opened. If you have module
54422+ support disabled, enabling this will close up four ways that are
54423+ currently used to insert malicious code into the running kernel.
54424+ Even with all these features enabled, we still highly recommend that
54425+ you use the RBAC system, as it is still possible for an attacker to
54426+ modify the running kernel through privileged I/O granted by ioperm/iopl.
54427+ If you are not using XFree86, you may be able to stop this additional
54428+ case by enabling the 'Disable privileged I/O' option. Though nothing
54429+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
54430+ but only to video memory, which is the only writing we allow in this
54431+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
54432+ not be allowed to mprotect it with PROT_WRITE later.
54433+ It is highly recommended that you say Y here if you meet all the
54434+ conditions above.
54435+
54436+config GRKERNSEC_VM86
54437+ bool "Restrict VM86 mode"
54438+ depends on X86_32
54439+
54440+ help
54441+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
54442+ make use of a special execution mode on 32bit x86 processors called
54443+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
54444+ video cards and will still work with this option enabled. The purpose
54445+ of the option is to prevent exploitation of emulation errors in
54446+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
54447+ Nearly all users should be able to enable this option.
54448+
54449+config GRKERNSEC_IO
54450+ bool "Disable privileged I/O"
54451+ depends on X86
54452+ select RTC_CLASS
54453+ select RTC_INTF_DEV
54454+ select RTC_DRV_CMOS
54455+
54456+ help
54457+ If you say Y here, all ioperm and iopl calls will return an error.
54458+ Ioperm and iopl can be used to modify the running kernel.
54459+ Unfortunately, some programs need this access to operate properly,
54460+ the most notable of which are XFree86 and hwclock. hwclock can be
54461+ remedied by having RTC support in the kernel, so real-time
54462+ clock support is enabled if this option is enabled, to ensure
54463+ that hwclock operates correctly. XFree86 still will not
54464+ operate correctly with this option enabled, so DO NOT CHOOSE Y
54465+ IF YOU USE XFree86. If you use XFree86 and you still want to
54466+ protect your kernel against modification, use the RBAC system.
54467+
54468+config GRKERNSEC_PROC_MEMMAP
54469+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
54470+ default y if (PAX_NOEXEC || PAX_ASLR)
54471+ depends on PAX_NOEXEC || PAX_ASLR
54472+ help
54473+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
54474+ give no information about the addresses of its mappings if
54475+ PaX features that rely on random addresses are enabled on the task.
54476+ If you use PaX it is greatly recommended that you say Y here as it
54477+ closes up a hole that makes the full ASLR useless for suid
54478+ binaries.
54479+
54480+config GRKERNSEC_BRUTE
54481+ bool "Deter exploit bruteforcing"
54482+ help
54483+ If you say Y here, attempts to bruteforce exploits against forking
54484+ daemons such as apache or sshd, as well as against suid/sgid binaries
54485+ will be deterred. When a child of a forking daemon is killed by PaX
54486+ or crashes due to an illegal instruction or other suspicious signal,
54487+ the parent process will be delayed 30 seconds upon every subsequent
54488+ fork until the administrator is able to assess the situation and
54489+ restart the daemon.
54490+ In the suid/sgid case, the attempt is logged, the user has all their
54491+ processes terminated, and they are prevented from executing any further
54492+ processes for 15 minutes.
54493+ It is recommended that you also enable signal logging in the auditing
54494+ section so that logs are generated when a process triggers a suspicious
54495+ signal.
54496+ If the sysctl option is enabled, a sysctl option with name
54497+ "deter_bruteforce" is created.
54498+
54499+config GRKERNSEC_MODHARDEN
54500+ bool "Harden module auto-loading"
54501+ depends on MODULES
54502+ help
54503+ If you say Y here, module auto-loading in response to use of some
54504+ feature implemented by an unloaded module will be restricted to
54505+ root users. Enabling this option helps defend against attacks
54506+ by unprivileged users who abuse the auto-loading behavior to
54507+ cause a vulnerable module to load that is then exploited.
54508+
54509+ If this option prevents a legitimate use of auto-loading for a
54510+ non-root user, the administrator can execute modprobe manually
54511+ with the exact name of the module mentioned in the alert log.
54512+ Alternatively, the administrator can add the module to the list
54513+ of modules loaded at boot by modifying init scripts.
54514+
54515+ Modification of init scripts will most likely be needed on
54516+ Ubuntu servers with encrypted home directory support enabled,
54517+ as the first non-root user logging in will cause the ecb(aes),
54518+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
54519+
54520+config GRKERNSEC_HIDESYM
54521+ bool "Hide kernel symbols"
54522+ help
54523+ If you say Y here, getting information on loaded modules, and
54524+ displaying all kernel symbols through a syscall will be restricted
54525+ to users with CAP_SYS_MODULE. For software compatibility reasons,
54526+ /proc/kallsyms will be restricted to the root user. The RBAC
54527+ system can hide that entry even from root.
54528+
54529+ This option also prevents leaking of kernel addresses through
54530+ several /proc entries.
54531+
54532+ Note that this option is only effective provided the following
54533+ conditions are met:
54534+ 1) The kernel using grsecurity is not precompiled by some distribution
54535+ 2) You have also enabled GRKERNSEC_DMESG
54536+ 3) You are using the RBAC system and hiding other files such as your
54537+ kernel image and System.map. Alternatively, enabling this option
54538+ causes the permissions on /boot, /lib/modules, and the kernel
54539+ source directory to change at compile time to prevent
54540+ reading by non-root users.
54541+ If the above conditions are met, this option will aid in providing a
54542+ useful protection against local kernel exploitation of overflows
54543+ and arbitrary read/write vulnerabilities.
54544+
54545+config GRKERNSEC_KERN_LOCKOUT
54546+ bool "Active kernel exploit response"
54547+ depends on X86 || ARM || PPC || SPARC
54548+ help
54549+ If you say Y here, when a PaX alert is triggered due to suspicious
54550+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
54551+ or an OOPs occurs due to bad memory accesses, instead of just
54552+ terminating the offending process (and potentially allowing
54553+ a subsequent exploit from the same user), we will take one of two
54554+ actions:
54555+ If the user was root, we will panic the system
54556+ If the user was non-root, we will log the attempt, terminate
54557+ all processes owned by the user, then prevent them from creating
54558+ any new processes until the system is restarted
54559+ This deters repeated kernel exploitation/bruteforcing attempts
54560+ and is useful for later forensics.
54561+
54562+endmenu
54563+menu "Role Based Access Control Options"
54564+depends on GRKERNSEC
54565+
54566+config GRKERNSEC_RBAC_DEBUG
54567+ bool
54568+
54569+config GRKERNSEC_NO_RBAC
54570+ bool "Disable RBAC system"
54571+ help
54572+ If you say Y here, the /dev/grsec device will be removed from the kernel,
54573+ preventing the RBAC system from being enabled. You should only say Y
54574+ here if you have no intention of using the RBAC system, so as to prevent
54575+ an attacker with root access from misusing the RBAC system to hide files
54576+ and processes when loadable module support and /dev/[k]mem have been
54577+ locked down.
54578+
54579+config GRKERNSEC_ACL_HIDEKERN
54580+ bool "Hide kernel processes"
54581+ help
54582+ If you say Y here, all kernel threads will be hidden to all
54583+ processes but those whose subject has the "view hidden processes"
54584+ flag.
54585+
54586+config GRKERNSEC_ACL_MAXTRIES
54587+ int "Maximum tries before password lockout"
54588+ default 3
54589+ help
54590+ This option enforces the maximum number of times a user can attempt
54591+ to authorize themselves with the grsecurity RBAC system before being
54592+ denied the ability to attempt authorization again for a specified time.
54593+ The lower the number, the harder it will be to brute-force a password.
54594+
54595+config GRKERNSEC_ACL_TIMEOUT
54596+ int "Time to wait after max password tries, in seconds"
54597+ default 30
54598+ help
54599+ This option specifies the time the user must wait after attempting to
54600+ authorize to the RBAC system with the maximum number of invalid
54601+ passwords. The higher the number, the harder it will be to brute-force
54602+ a password.
54603+
54604+endmenu
54605+menu "Filesystem Protections"
54606+depends on GRKERNSEC
54607+
54608+config GRKERNSEC_PROC
54609+ bool "Proc restrictions"
54610+ help
54611+ If you say Y here, the permissions of the /proc filesystem
54612+ will be altered to enhance system security and privacy. You MUST
54613+ choose either a user only restriction or a user and group restriction.
54614+ Depending upon the option you choose, you can either restrict users to
54615+ see only the processes they themselves run, or choose a group that can
54616+ view all processes and files normally restricted to root if you choose
54617+ the "restrict to user only" option. NOTE: If you're running identd as
54618+ a non-root user, you will have to run it as the group you specify here.
54619+
54620+config GRKERNSEC_PROC_USER
54621+ bool "Restrict /proc to user only"
54622+ depends on GRKERNSEC_PROC
54623+ help
54624+ If you say Y here, non-root users will only be able to view their own
54625+ processes, and restricts them from viewing network-related information,
54626+ and viewing kernel symbol and module information.
54627+
54628+config GRKERNSEC_PROC_USERGROUP
54629+ bool "Allow special group"
54630+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
54631+ help
54632+ If you say Y here, you will be able to select a group that will be
54633+ able to view all processes and network-related information. If you've
54634+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
54635+ remain hidden. This option is useful if you want to run identd as
54636+ a non-root user.
54637+
54638+config GRKERNSEC_PROC_GID
54639+ int "GID for special group"
54640+ depends on GRKERNSEC_PROC_USERGROUP
54641+ default 1001
54642+
54643+config GRKERNSEC_PROC_ADD
54644+ bool "Additional restrictions"
54645+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
54646+ help
54647+ If you say Y here, additional restrictions will be placed on
54648+ /proc that keep normal users from viewing device information and
54649+ slabinfo information that could be useful for exploits.
54650+
54651+config GRKERNSEC_LINK
54652+ bool "Linking restrictions"
54653+ help
54654+ If you say Y here, /tmp race exploits will be prevented, since users
54655+ will no longer be able to follow symlinks owned by other users in
54656+ world-writable +t directories (e.g. /tmp), unless the owner of the
54657+ symlink is the owner of the directory. users will also not be
54658+ able to hardlink to files they do not own. If the sysctl option is
54659+ enabled, a sysctl option with name "linking_restrictions" is created.
54660+
54661+config GRKERNSEC_FIFO
54662+ bool "FIFO restrictions"
54663+ help
54664+ If you say Y here, users will not be able to write to FIFOs they don't
54665+ own in world-writable +t directories (e.g. /tmp), unless the owner of
54666+ the FIFO is the same owner of the directory it's held in. If the sysctl
54667+ option is enabled, a sysctl option with name "fifo_restrictions" is
54668+ created.
54669+
54670+config GRKERNSEC_SYSFS_RESTRICT
54671+ bool "Sysfs/debugfs restriction"
54672+ depends on SYSFS
54673+ help
54674+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
54675+ any filesystem normally mounted under it (e.g. debugfs) will only
54676+ be accessible by root. These filesystems generally provide access
54677+ to hardware and debug information that isn't appropriate for unprivileged
54678+ users of the system. Sysfs and debugfs have also become a large source
54679+ of new vulnerabilities, ranging from infoleaks to local compromise.
54680+ There has been very little oversight with an eye toward security involved
54681+ in adding new exporters of information to these filesystems, so their
54682+ use is discouraged.
54683+ This option is equivalent to a chmod 0700 of the mount paths.
54684+
54685+config GRKERNSEC_ROFS
54686+ bool "Runtime read-only mount protection"
54687+ help
54688+ If you say Y here, a sysctl option with name "romount_protect" will
54689+ be created. By setting this option to 1 at runtime, filesystems
54690+ will be protected in the following ways:
54691+ * No new writable mounts will be allowed
54692+ * Existing read-only mounts won't be able to be remounted read/write
54693+ * Write operations will be denied on all block devices
54694+ This option acts independently of grsec_lock: once it is set to 1,
54695+ it cannot be turned off. Therefore, please be mindful of the resulting
54696+ behavior if this option is enabled in an init script on a read-only
54697+ filesystem. This feature is mainly intended for secure embedded systems.
54698+
54699+config GRKERNSEC_CHROOT
54700+ bool "Chroot jail restrictions"
54701+ help
54702+ If you say Y here, you will be able to choose several options that will
54703+ make breaking out of a chrooted jail much more difficult. If you
54704+ encounter no software incompatibilities with the following options, it
54705+ is recommended that you enable each one.
54706+
54707+config GRKERNSEC_CHROOT_MOUNT
54708+ bool "Deny mounts"
54709+ depends on GRKERNSEC_CHROOT
54710+ help
54711+ If you say Y here, processes inside a chroot will not be able to
54712+ mount or remount filesystems. If the sysctl option is enabled, a
54713+ sysctl option with name "chroot_deny_mount" is created.
54714+
54715+config GRKERNSEC_CHROOT_DOUBLE
54716+ bool "Deny double-chroots"
54717+ depends on GRKERNSEC_CHROOT
54718+ help
54719+ If you say Y here, processes inside a chroot will not be able to chroot
54720+ again outside the chroot. This is a widely used method of breaking
54721+ out of a chroot jail and should not be allowed. If the sysctl
54722+ option is enabled, a sysctl option with name
54723+ "chroot_deny_chroot" is created.
54724+
54725+config GRKERNSEC_CHROOT_PIVOT
54726+ bool "Deny pivot_root in chroot"
54727+ depends on GRKERNSEC_CHROOT
54728+ help
54729+ If you say Y here, processes inside a chroot will not be able to use
54730+ a function called pivot_root() that was introduced in Linux 2.3.41. It
54731+ works similar to chroot in that it changes the root filesystem. This
54732+ function could be misused in a chrooted process to attempt to break out
54733+ of the chroot, and therefore should not be allowed. If the sysctl
54734+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
54735+ created.
54736+
54737+config GRKERNSEC_CHROOT_CHDIR
54738+ bool "Enforce chdir(\"/\") on all chroots"
54739+ depends on GRKERNSEC_CHROOT
54740+ help
54741+ If you say Y here, the current working directory of all newly-chrooted
54742+ applications will be set to the the root directory of the chroot.
54743+ The man page on chroot(2) states:
54744+ Note that this call does not change the current working
54745+ directory, so that `.' can be outside the tree rooted at
54746+ `/'. In particular, the super-user can escape from a
54747+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
54748+
54749+ It is recommended that you say Y here, since it's not known to break
54750+ any software. If the sysctl option is enabled, a sysctl option with
54751+ name "chroot_enforce_chdir" is created.
54752+
54753+config GRKERNSEC_CHROOT_CHMOD
54754+ bool "Deny (f)chmod +s"
54755+ depends on GRKERNSEC_CHROOT
54756+ help
54757+ If you say Y here, processes inside a chroot will not be able to chmod
54758+ or fchmod files to make them have suid or sgid bits. This protects
54759+ against another published method of breaking a chroot. If the sysctl
54760+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
54761+ created.
54762+
54763+config GRKERNSEC_CHROOT_FCHDIR
54764+ bool "Deny fchdir out of chroot"
54765+ depends on GRKERNSEC_CHROOT
54766+ help
54767+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
54768+ to a file descriptor of the chrooting process that points to a directory
54769+ outside the filesystem will be stopped. If the sysctl option
54770+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
54771+
54772+config GRKERNSEC_CHROOT_MKNOD
54773+ bool "Deny mknod"
54774+ depends on GRKERNSEC_CHROOT
54775+ help
54776+ If you say Y here, processes inside a chroot will not be allowed to
54777+ mknod. The problem with using mknod inside a chroot is that it
54778+ would allow an attacker to create a device entry that is the same
54779+ as one on the physical root of your system, which could range from
54780+ anything from the console device to a device for your harddrive (which
54781+ they could then use to wipe the drive or steal data). It is recommended
54782+ that you say Y here, unless you run into software incompatibilities.
54783+ If the sysctl option is enabled, a sysctl option with name
54784+ "chroot_deny_mknod" is created.
54785+
54786+config GRKERNSEC_CHROOT_SHMAT
54787+ bool "Deny shmat() out of chroot"
54788+ depends on GRKERNSEC_CHROOT
54789+ help
54790+ If you say Y here, processes inside a chroot will not be able to attach
54791+ to shared memory segments that were created outside of the chroot jail.
54792+ It is recommended that you say Y here. If the sysctl option is enabled,
54793+ a sysctl option with name "chroot_deny_shmat" is created.
54794+
54795+config GRKERNSEC_CHROOT_UNIX
54796+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
54797+ depends on GRKERNSEC_CHROOT
54798+ help
54799+ If you say Y here, processes inside a chroot will not be able to
54800+ connect to abstract (meaning not belonging to a filesystem) Unix
54801+ domain sockets that were bound outside of a chroot. It is recommended
54802+ that you say Y here. If the sysctl option is enabled, a sysctl option
54803+ with name "chroot_deny_unix" is created.
54804+
54805+config GRKERNSEC_CHROOT_FINDTASK
54806+ bool "Protect outside processes"
54807+ depends on GRKERNSEC_CHROOT
54808+ help
54809+ If you say Y here, processes inside a chroot will not be able to
54810+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
54811+ getsid, or view any process outside of the chroot. If the sysctl
54812+ option is enabled, a sysctl option with name "chroot_findtask" is
54813+ created.
54814+
54815+config GRKERNSEC_CHROOT_NICE
54816+ bool "Restrict priority changes"
54817+ depends on GRKERNSEC_CHROOT
54818+ help
54819+ If you say Y here, processes inside a chroot will not be able to raise
54820+ the priority of processes in the chroot, or alter the priority of
54821+ processes outside the chroot. This provides more security than simply
54822+ removing CAP_SYS_NICE from the process' capability set. If the
54823+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
54824+ is created.
54825+
54826+config GRKERNSEC_CHROOT_SYSCTL
54827+ bool "Deny sysctl writes"
54828+ depends on GRKERNSEC_CHROOT
54829+ help
54830+ If you say Y here, an attacker in a chroot will not be able to
54831+ write to sysctl entries, either by sysctl(2) or through a /proc
54832+ interface. It is strongly recommended that you say Y here. If the
54833+ sysctl option is enabled, a sysctl option with name
54834+ "chroot_deny_sysctl" is created.
54835+
54836+config GRKERNSEC_CHROOT_CAPS
54837+ bool "Capability restrictions"
54838+ depends on GRKERNSEC_CHROOT
54839+ help
54840+ If you say Y here, the capabilities on all processes within a
54841+ chroot jail will be lowered to stop module insertion, raw i/o,
54842+ system and net admin tasks, rebooting the system, modifying immutable
54843+ files, modifying IPC owned by another, and changing the system time.
54844+ This is left an option because it can break some apps. Disable this
54845+ if your chrooted apps are having problems performing those kinds of
54846+ tasks. If the sysctl option is enabled, a sysctl option with
54847+ name "chroot_caps" is created.
54848+
54849+endmenu
54850+menu "Kernel Auditing"
54851+depends on GRKERNSEC
54852+
54853+config GRKERNSEC_AUDIT_GROUP
54854+ bool "Single group for auditing"
54855+ help
54856+ If you say Y here, the exec, chdir, and (un)mount logging features
54857+ will only operate on a group you specify. This option is recommended
54858+ if you only want to watch certain users instead of having a large
54859+ amount of logs from the entire system. If the sysctl option is enabled,
54860+ a sysctl option with name "audit_group" is created.
54861+
54862+config GRKERNSEC_AUDIT_GID
54863+ int "GID for auditing"
54864+ depends on GRKERNSEC_AUDIT_GROUP
54865+ default 1007
54866+
54867+config GRKERNSEC_EXECLOG
54868+ bool "Exec logging"
54869+ help
54870+ If you say Y here, all execve() calls will be logged (since the
54871+ other exec*() calls are frontends to execve(), all execution
54872+ will be logged). Useful for shell-servers that like to keep track
54873+ of their users. If the sysctl option is enabled, a sysctl option with
54874+ name "exec_logging" is created.
54875+ WARNING: This option when enabled will produce a LOT of logs, especially
54876+ on an active system.
54877+
54878+config GRKERNSEC_RESLOG
54879+ bool "Resource logging"
54880+ help
54881+ If you say Y here, all attempts to overstep resource limits will
54882+ be logged with the resource name, the requested size, and the current
54883+ limit. It is highly recommended that you say Y here. If the sysctl
54884+ option is enabled, a sysctl option with name "resource_logging" is
54885+ created. If the RBAC system is enabled, the sysctl value is ignored.
54886+
54887+config GRKERNSEC_CHROOT_EXECLOG
54888+ bool "Log execs within chroot"
54889+ help
54890+ If you say Y here, all executions inside a chroot jail will be logged
54891+ to syslog. This can cause a large amount of logs if certain
54892+ applications (eg. djb's daemontools) are installed on the system, and
54893+ is therefore left as an option. If the sysctl option is enabled, a
54894+ sysctl option with name "chroot_execlog" is created.
54895+
54896+config GRKERNSEC_AUDIT_PTRACE
54897+ bool "Ptrace logging"
54898+ help
54899+ If you say Y here, all attempts to attach to a process via ptrace
54900+ will be logged. If the sysctl option is enabled, a sysctl option
54901+ with name "audit_ptrace" is created.
54902+
54903+config GRKERNSEC_AUDIT_CHDIR
54904+ bool "Chdir logging"
54905+ help
54906+ If you say Y here, all chdir() calls will be logged. If the sysctl
54907+ option is enabled, a sysctl option with name "audit_chdir" is created.
54908+
54909+config GRKERNSEC_AUDIT_MOUNT
54910+ bool "(Un)Mount logging"
54911+ help
54912+ If you say Y here, all mounts and unmounts will be logged. If the
54913+ sysctl option is enabled, a sysctl option with name "audit_mount" is
54914+ created.
54915+
54916+config GRKERNSEC_SIGNAL
54917+ bool "Signal logging"
54918+ help
54919+ If you say Y here, certain important signals will be logged, such as
54920+ SIGSEGV, which will as a result inform you of when a error in a program
54921+ occurred, which in some cases could mean a possible exploit attempt.
54922+ If the sysctl option is enabled, a sysctl option with name
54923+ "signal_logging" is created.
54924+
54925+config GRKERNSEC_FORKFAIL
54926+ bool "Fork failure logging"
54927+ help
54928+ If you say Y here, all failed fork() attempts will be logged.
54929+ This could suggest a fork bomb, or someone attempting to overstep
54930+ their process limit. If the sysctl option is enabled, a sysctl option
54931+ with name "forkfail_logging" is created.
54932+
54933+config GRKERNSEC_TIME
54934+ bool "Time change logging"
54935+ help
54936+ If you say Y here, any changes of the system clock will be logged.
54937+ If the sysctl option is enabled, a sysctl option with name
54938+ "timechange_logging" is created.
54939+
54940+config GRKERNSEC_PROC_IPADDR
54941+ bool "/proc/<pid>/ipaddr support"
54942+ help
54943+ If you say Y here, a new entry will be added to each /proc/<pid>
54944+ directory that contains the IP address of the person using the task.
54945+ The IP is carried across local TCP and AF_UNIX stream sockets.
54946+ This information can be useful for IDS/IPSes to perform remote response
54947+ to a local attack. The entry is readable by only the owner of the
54948+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
54949+ the RBAC system), and thus does not create privacy concerns.
54950+
54951+config GRKERNSEC_RWXMAP_LOG
54952+ bool 'Denied RWX mmap/mprotect logging'
54953+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
54954+ help
54955+ If you say Y here, calls to mmap() and mprotect() with explicit
54956+ usage of PROT_WRITE and PROT_EXEC together will be logged when
54957+ denied by the PAX_MPROTECT feature. If the sysctl option is
54958+ enabled, a sysctl option with name "rwxmap_logging" is created.
54959+
54960+config GRKERNSEC_AUDIT_TEXTREL
54961+ bool 'ELF text relocations logging (READ HELP)'
54962+ depends on PAX_MPROTECT
54963+ help
54964+ If you say Y here, text relocations will be logged with the filename
54965+ of the offending library or binary. The purpose of the feature is
54966+ to help Linux distribution developers get rid of libraries and
54967+ binaries that need text relocations which hinder the future progress
54968+ of PaX. Only Linux distribution developers should say Y here, and
54969+ never on a production machine, as this option creates an information
54970+ leak that could aid an attacker in defeating the randomization of
54971+ a single memory region. If the sysctl option is enabled, a sysctl
54972+ option with name "audit_textrel" is created.
54973+
54974+endmenu
54975+
54976+menu "Executable Protections"
54977+depends on GRKERNSEC
54978+
54979+config GRKERNSEC_DMESG
54980+ bool "Dmesg(8) restriction"
54981+ help
54982+ If you say Y here, non-root users will not be able to use dmesg(8)
54983+ to view up to the last 4kb of messages in the kernel's log buffer.
54984+ The kernel's log buffer often contains kernel addresses and other
54985+ identifying information useful to an attacker in fingerprinting a
54986+ system for a targeted exploit.
54987+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
54988+ created.
54989+
54990+config GRKERNSEC_HARDEN_PTRACE
54991+ bool "Deter ptrace-based process snooping"
54992+ help
54993+ If you say Y here, TTY sniffers and other malicious monitoring
54994+ programs implemented through ptrace will be defeated. If you
54995+ have been using the RBAC system, this option has already been
54996+ enabled for several years for all users, with the ability to make
54997+ fine-grained exceptions.
54998+
54999+ This option only affects the ability of non-root users to ptrace
55000+ processes that are not a descendent of the ptracing process.
55001+ This means that strace ./binary and gdb ./binary will still work,
55002+ but attaching to arbitrary processes will not. If the sysctl
55003+ option is enabled, a sysctl option with name "harden_ptrace" is
55004+ created.
55005+
55006+config GRKERNSEC_TPE
55007+ bool "Trusted Path Execution (TPE)"
55008+ help
55009+ If you say Y here, you will be able to choose a gid to add to the
55010+ supplementary groups of users you want to mark as "untrusted."
55011+ These users will not be able to execute any files that are not in
55012+ root-owned directories writable only by root. If the sysctl option
55013+ is enabled, a sysctl option with name "tpe" is created.
55014+
55015+config GRKERNSEC_TPE_ALL
55016+ bool "Partially restrict all non-root users"
55017+ depends on GRKERNSEC_TPE
55018+ help
55019+ If you say Y here, all non-root users will be covered under
55020+ a weaker TPE restriction. This is separate from, and in addition to,
55021+ the main TPE options that you have selected elsewhere. Thus, if a
55022+ "trusted" GID is chosen, this restriction applies to even that GID.
55023+ Under this restriction, all non-root users will only be allowed to
55024+ execute files in directories they own that are not group or
55025+ world-writable, or in directories owned by root and writable only by
55026+ root. If the sysctl option is enabled, a sysctl option with name
55027+ "tpe_restrict_all" is created.
55028+
55029+config GRKERNSEC_TPE_INVERT
55030+ bool "Invert GID option"
55031+ depends on GRKERNSEC_TPE
55032+ help
55033+ If you say Y here, the group you specify in the TPE configuration will
55034+ decide what group TPE restrictions will be *disabled* for. This
55035+ option is useful if you want TPE restrictions to be applied to most
55036+ users on the system. If the sysctl option is enabled, a sysctl option
55037+ with name "tpe_invert" is created. Unlike other sysctl options, this
55038+ entry will default to on for backward-compatibility.
55039+
55040+config GRKERNSEC_TPE_GID
55041+ int "GID for untrusted users"
55042+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
55043+ default 1005
55044+ help
55045+ Setting this GID determines what group TPE restrictions will be
55046+ *enabled* for. If the sysctl option is enabled, a sysctl option
55047+ with name "tpe_gid" is created.
55048+
55049+config GRKERNSEC_TPE_GID
55050+ int "GID for trusted users"
55051+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
55052+ default 1005
55053+ help
55054+ Setting this GID determines what group TPE restrictions will be
55055+ *disabled* for. If the sysctl option is enabled, a sysctl option
55056+ with name "tpe_gid" is created.
55057+
55058+endmenu
55059+menu "Network Protections"
55060+depends on GRKERNSEC
55061+
55062+config GRKERNSEC_RANDNET
55063+ bool "Larger entropy pools"
55064+ help
55065+ If you say Y here, the entropy pools used for many features of Linux
55066+ and grsecurity will be doubled in size. Since several grsecurity
55067+ features use additional randomness, it is recommended that you say Y
55068+ here. Saying Y here has a similar effect as modifying
55069+ /proc/sys/kernel/random/poolsize.
55070+
55071+config GRKERNSEC_BLACKHOLE
55072+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
55073+ depends on NET
55074+ help
55075+ If you say Y here, neither TCP resets nor ICMP
55076+ destination-unreachable packets will be sent in response to packets
55077+ sent to ports for which no associated listening process exists.
55078+ This feature supports both IPV4 and IPV6 and exempts the
55079+ loopback interface from blackholing. Enabling this feature
55080+ makes a host more resilient to DoS attacks and reduces network
55081+ visibility against scanners.
55082+
55083+ The blackhole feature as-implemented is equivalent to the FreeBSD
55084+ blackhole feature, as it prevents RST responses to all packets, not
55085+ just SYNs. Under most application behavior this causes no
55086+ problems, but applications (like haproxy) may not close certain
55087+ connections in a way that cleanly terminates them on the remote
55088+ end, leaving the remote host in LAST_ACK state. Because of this
55089+ side-effect and to prevent intentional LAST_ACK DoSes, this
55090+ feature also adds automatic mitigation against such attacks.
55091+ The mitigation drastically reduces the amount of time a socket
55092+ can spend in LAST_ACK state. If you're using haproxy and not
55093+ all servers it connects to have this option enabled, consider
55094+ disabling this feature on the haproxy host.
55095+
55096+ If the sysctl option is enabled, two sysctl options with names
55097+ "ip_blackhole" and "lastack_retries" will be created.
55098+ While "ip_blackhole" takes the standard zero/non-zero on/off
55099+ toggle, "lastack_retries" uses the same kinds of values as
55100+ "tcp_retries1" and "tcp_retries2". The default value of 4
55101+ prevents a socket from lasting more than 45 seconds in LAST_ACK
55102+ state.
55103+
55104+config GRKERNSEC_SOCKET
55105+ bool "Socket restrictions"
55106+ depends on NET
55107+ help
55108+ If you say Y here, you will be able to choose from several options.
55109+ If you assign a GID on your system and add it to the supplementary
55110+ groups of users you want to restrict socket access to, this patch
55111+ will perform up to three things, based on the option(s) you choose.
55112+
55113+config GRKERNSEC_SOCKET_ALL
55114+ bool "Deny any sockets to group"
55115+ depends on GRKERNSEC_SOCKET
55116+ help
55117+ If you say Y here, you will be able to choose a GID of whose users will
55118+ be unable to connect to other hosts from your machine or run server
55119+ applications from your machine. If the sysctl option is enabled, a
55120+ sysctl option with name "socket_all" is created.
55121+
55122+config GRKERNSEC_SOCKET_ALL_GID
55123+ int "GID to deny all sockets for"
55124+ depends on GRKERNSEC_SOCKET_ALL
55125+ default 1004
55126+ help
55127+ Here you can choose the GID to disable socket access for. Remember to
55128+ add the users you want socket access disabled for to the GID
55129+ specified here. If the sysctl option is enabled, a sysctl option
55130+ with name "socket_all_gid" is created.
55131+
55132+config GRKERNSEC_SOCKET_CLIENT
55133+ bool "Deny client sockets to group"
55134+ depends on GRKERNSEC_SOCKET
55135+ help
55136+ If you say Y here, you will be able to choose a GID of whose users will
55137+ be unable to connect to other hosts from your machine, but will be
55138+ able to run servers. If this option is enabled, all users in the group
55139+ you specify will have to use passive mode when initiating ftp transfers
55140+ from the shell on your machine. If the sysctl option is enabled, a
55141+ sysctl option with name "socket_client" is created.
55142+
55143+config GRKERNSEC_SOCKET_CLIENT_GID
55144+ int "GID to deny client sockets for"
55145+ depends on GRKERNSEC_SOCKET_CLIENT
55146+ default 1003
55147+ help
55148+ Here you can choose the GID to disable client socket access for.
55149+ Remember to add the users you want client socket access disabled for to
55150+ the GID specified here. If the sysctl option is enabled, a sysctl
55151+ option with name "socket_client_gid" is created.
55152+
55153+config GRKERNSEC_SOCKET_SERVER
55154+ bool "Deny server sockets to group"
55155+ depends on GRKERNSEC_SOCKET
55156+ help
55157+ If you say Y here, you will be able to choose a GID of whose users will
55158+ be unable to run server applications from your machine. If the sysctl
55159+ option is enabled, a sysctl option with name "socket_server" is created.
55160+
55161+config GRKERNSEC_SOCKET_SERVER_GID
55162+ int "GID to deny server sockets for"
55163+ depends on GRKERNSEC_SOCKET_SERVER
55164+ default 1002
55165+ help
55166+ Here you can choose the GID to disable server socket access for.
55167+ Remember to add the users you want server socket access disabled for to
55168+ the GID specified here. If the sysctl option is enabled, a sysctl
55169+ option with name "socket_server_gid" is created.
55170+
55171+endmenu
55172+menu "Sysctl support"
55173+depends on GRKERNSEC && SYSCTL
55174+
55175+config GRKERNSEC_SYSCTL
55176+ bool "Sysctl support"
55177+ help
55178+ If you say Y here, you will be able to change the options that
55179+ grsecurity runs with at bootup, without having to recompile your
55180+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
55181+ to enable (1) or disable (0) various features. All the sysctl entries
55182+ are mutable until the "grsec_lock" entry is set to a non-zero value.
55183+ All features enabled in the kernel configuration are disabled at boot
55184+ if you do not say Y to the "Turn on features by default" option.
55185+ All options should be set at startup, and the grsec_lock entry should
55186+ be set to a non-zero value after all the options are set.
55187+ *THIS IS EXTREMELY IMPORTANT*
55188+
55189+config GRKERNSEC_SYSCTL_DISTRO
55190+ bool "Extra sysctl support for distro makers (READ HELP)"
55191+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
55192+ help
55193+ If you say Y here, additional sysctl options will be created
55194+ for features that affect processes running as root. Therefore,
55195+ it is critical when using this option that the grsec_lock entry be
55196+ enabled after boot. Only distros with prebuilt kernel packages
55197+ with this option enabled that can ensure grsec_lock is enabled
55198+ after boot should use this option.
55199+ *Failure to set grsec_lock after boot makes all grsec features
55200+ this option covers useless*
55201+
55202+ Currently this option creates the following sysctl entries:
55203+ "Disable Privileged I/O": "disable_priv_io"
55204+
55205+config GRKERNSEC_SYSCTL_ON
55206+ bool "Turn on features by default"
55207+ depends on GRKERNSEC_SYSCTL
55208+ help
55209+ If you say Y here, instead of having all features enabled in the
55210+ kernel configuration disabled at boot time, the features will be
55211+ enabled at boot time. It is recommended you say Y here unless
55212+ there is some reason you would want all sysctl-tunable features to
55213+ be disabled by default. As mentioned elsewhere, it is important
55214+ to enable the grsec_lock entry once you have finished modifying
55215+ the sysctl entries.
55216+
55217+endmenu
55218+menu "Logging Options"
55219+depends on GRKERNSEC
55220+
55221+config GRKERNSEC_FLOODTIME
55222+ int "Seconds in between log messages (minimum)"
55223+ default 10
55224+ help
55225+ This option allows you to enforce the number of seconds between
55226+ grsecurity log messages. The default should be suitable for most
55227+ people, however, if you choose to change it, choose a value small enough
55228+ to allow informative logs to be produced, but large enough to
55229+ prevent flooding.
55230+
55231+config GRKERNSEC_FLOODBURST
55232+ int "Number of messages in a burst (maximum)"
55233+ default 6
55234+ help
55235+ This option allows you to choose the maximum number of messages allowed
55236+ within the flood time interval you chose in a separate option. The
55237+ default should be suitable for most people, however if you find that
55238+ many of your logs are being interpreted as flooding, you may want to
55239+ raise this value.
55240+
55241+endmenu
55242+
55243+endmenu
55244diff --git a/grsecurity/Makefile b/grsecurity/Makefile
55245new file mode 100644
55246index 0000000..be9ae3a
55247--- /dev/null
55248+++ b/grsecurity/Makefile
55249@@ -0,0 +1,36 @@
55250+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
55251+# during 2001-2009 it has been completely redesigned by Brad Spengler
55252+# into an RBAC system
55253+#
55254+# All code in this directory and various hooks inserted throughout the kernel
55255+# are copyright Brad Spengler - Open Source Security, Inc., and released
55256+# under the GPL v2 or higher
55257+
55258+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
55259+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
55260+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
55261+
55262+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
55263+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
55264+ gracl_learn.o grsec_log.o
55265+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
55266+
55267+ifdef CONFIG_NET
55268+obj-y += grsec_sock.o
55269+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
55270+endif
55271+
55272+ifndef CONFIG_GRKERNSEC
55273+obj-y += grsec_disabled.o
55274+endif
55275+
55276+ifdef CONFIG_GRKERNSEC_HIDESYM
55277+extra-y := grsec_hidesym.o
55278+$(obj)/grsec_hidesym.o:
55279+ @-chmod -f 500 /boot
55280+ @-chmod -f 500 /lib/modules
55281+ @-chmod -f 500 /lib64/modules
55282+ @-chmod -f 500 /lib32/modules
55283+ @-chmod -f 700 .
55284+ @echo ' grsec: protected kernel image paths'
55285+endif
55286diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
55287new file mode 100644
55288index 0000000..6bd68d6
55289--- /dev/null
55290+++ b/grsecurity/gracl.c
55291@@ -0,0 +1,4141 @@
55292+#include <linux/kernel.h>
55293+#include <linux/module.h>
55294+#include <linux/sched.h>
55295+#include <linux/mm.h>
55296+#include <linux/file.h>
55297+#include <linux/fs.h>
55298+#include <linux/namei.h>
55299+#include <linux/mount.h>
55300+#include <linux/tty.h>
55301+#include <linux/proc_fs.h>
55302+#include <linux/smp_lock.h>
55303+#include <linux/slab.h>
55304+#include <linux/vmalloc.h>
55305+#include <linux/types.h>
55306+#include <linux/sysctl.h>
55307+#include <linux/netdevice.h>
55308+#include <linux/ptrace.h>
55309+#include <linux/gracl.h>
55310+#include <linux/gralloc.h>
55311+#include <linux/grsecurity.h>
55312+#include <linux/grinternal.h>
55313+#include <linux/pid_namespace.h>
55314+#include <linux/fdtable.h>
55315+#include <linux/percpu.h>
55316+
55317+#include <asm/uaccess.h>
55318+#include <asm/errno.h>
55319+#include <asm/mman.h>
55320+
55321+static struct acl_role_db acl_role_set;
55322+static struct name_db name_set;
55323+static struct inodev_db inodev_set;
55324+
55325+/* for keeping track of userspace pointers used for subjects, so we
55326+ can share references in the kernel as well
55327+*/
55328+
55329+static struct dentry *real_root;
55330+static struct vfsmount *real_root_mnt;
55331+
55332+static struct acl_subj_map_db subj_map_set;
55333+
55334+static struct acl_role_label *default_role;
55335+
55336+static struct acl_role_label *role_list;
55337+
55338+static u16 acl_sp_role_value;
55339+
55340+extern char *gr_shared_page[4];
55341+static DEFINE_MUTEX(gr_dev_mutex);
55342+DEFINE_RWLOCK(gr_inode_lock);
55343+
55344+struct gr_arg *gr_usermode;
55345+
55346+static unsigned int gr_status __read_only = GR_STATUS_INIT;
55347+
55348+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
55349+extern void gr_clear_learn_entries(void);
55350+
55351+#ifdef CONFIG_GRKERNSEC_RESLOG
55352+extern void gr_log_resource(const struct task_struct *task,
55353+ const int res, const unsigned long wanted, const int gt);
55354+#endif
55355+
55356+unsigned char *gr_system_salt;
55357+unsigned char *gr_system_sum;
55358+
55359+static struct sprole_pw **acl_special_roles = NULL;
55360+static __u16 num_sprole_pws = 0;
55361+
55362+static struct acl_role_label *kernel_role = NULL;
55363+
55364+static unsigned int gr_auth_attempts = 0;
55365+static unsigned long gr_auth_expires = 0UL;
55366+
55367+#ifdef CONFIG_NET
55368+extern struct vfsmount *sock_mnt;
55369+#endif
55370+extern struct vfsmount *pipe_mnt;
55371+extern struct vfsmount *shm_mnt;
55372+#ifdef CONFIG_HUGETLBFS
55373+extern struct vfsmount *hugetlbfs_vfsmount;
55374+#endif
55375+
55376+static struct acl_object_label *fakefs_obj_rw;
55377+static struct acl_object_label *fakefs_obj_rwx;
55378+
55379+extern int gr_init_uidset(void);
55380+extern void gr_free_uidset(void);
55381+extern void gr_remove_uid(uid_t uid);
55382+extern int gr_find_uid(uid_t uid);
55383+
55384+__inline__ int
55385+gr_acl_is_enabled(void)
55386+{
55387+ return (gr_status & GR_READY);
55388+}
55389+
55390+#ifdef CONFIG_BTRFS_FS
55391+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55392+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55393+#endif
55394+
55395+static inline dev_t __get_dev(const struct dentry *dentry)
55396+{
55397+#ifdef CONFIG_BTRFS_FS
55398+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55399+ return get_btrfs_dev_from_inode(dentry->d_inode);
55400+ else
55401+#endif
55402+ return dentry->d_inode->i_sb->s_dev;
55403+}
55404+
55405+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
55406+{
55407+ return __get_dev(dentry);
55408+}
55409+
55410+static char gr_task_roletype_to_char(struct task_struct *task)
55411+{
55412+ switch (task->role->roletype &
55413+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
55414+ GR_ROLE_SPECIAL)) {
55415+ case GR_ROLE_DEFAULT:
55416+ return 'D';
55417+ case GR_ROLE_USER:
55418+ return 'U';
55419+ case GR_ROLE_GROUP:
55420+ return 'G';
55421+ case GR_ROLE_SPECIAL:
55422+ return 'S';
55423+ }
55424+
55425+ return 'X';
55426+}
55427+
55428+char gr_roletype_to_char(void)
55429+{
55430+ return gr_task_roletype_to_char(current);
55431+}
55432+
55433+__inline__ int
55434+gr_acl_tpe_check(void)
55435+{
55436+ if (unlikely(!(gr_status & GR_READY)))
55437+ return 0;
55438+ if (current->role->roletype & GR_ROLE_TPE)
55439+ return 1;
55440+ else
55441+ return 0;
55442+}
55443+
55444+int
55445+gr_handle_rawio(const struct inode *inode)
55446+{
55447+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55448+ if (inode && S_ISBLK(inode->i_mode) &&
55449+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
55450+ !capable(CAP_SYS_RAWIO))
55451+ return 1;
55452+#endif
55453+ return 0;
55454+}
55455+
55456+static int
55457+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
55458+{
55459+ if (likely(lena != lenb))
55460+ return 0;
55461+
55462+ return !memcmp(a, b, lena);
55463+}
55464+
55465+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
55466+{
55467+ *buflen -= namelen;
55468+ if (*buflen < 0)
55469+ return -ENAMETOOLONG;
55470+ *buffer -= namelen;
55471+ memcpy(*buffer, str, namelen);
55472+ return 0;
55473+}
55474+
55475+/* this must be called with vfsmount_lock and dcache_lock held */
55476+
55477+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
55478+ struct dentry *root, struct vfsmount *rootmnt,
55479+ char *buffer, int buflen)
55480+{
55481+ char * end = buffer+buflen;
55482+ char * retval;
55483+ int namelen;
55484+
55485+ *--end = '\0';
55486+ buflen--;
55487+
55488+ if (buflen < 1)
55489+ goto Elong;
55490+ /* Get '/' right */
55491+ retval = end-1;
55492+ *retval = '/';
55493+
55494+ for (;;) {
55495+ struct dentry * parent;
55496+
55497+ if (dentry == root && vfsmnt == rootmnt)
55498+ break;
55499+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
55500+ /* Global root? */
55501+ if (vfsmnt->mnt_parent == vfsmnt)
55502+ goto global_root;
55503+ dentry = vfsmnt->mnt_mountpoint;
55504+ vfsmnt = vfsmnt->mnt_parent;
55505+ continue;
55506+ }
55507+ parent = dentry->d_parent;
55508+ prefetch(parent);
55509+ namelen = dentry->d_name.len;
55510+ buflen -= namelen + 1;
55511+ if (buflen < 0)
55512+ goto Elong;
55513+ end -= namelen;
55514+ memcpy(end, dentry->d_name.name, namelen);
55515+ *--end = '/';
55516+ retval = end;
55517+ dentry = parent;
55518+ }
55519+
55520+out:
55521+ return retval;
55522+
55523+global_root:
55524+ namelen = dentry->d_name.len;
55525+ buflen -= namelen;
55526+ if (buflen < 0)
55527+ goto Elong;
55528+ retval -= namelen-1; /* hit the slash */
55529+ memcpy(retval, dentry->d_name.name, namelen);
55530+ goto out;
55531+Elong:
55532+ retval = ERR_PTR(-ENAMETOOLONG);
55533+ goto out;
55534+}
55535+
55536+static char *
55537+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
55538+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
55539+{
55540+ char *retval;
55541+
55542+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
55543+ if (unlikely(IS_ERR(retval)))
55544+ retval = strcpy(buf, "<path too long>");
55545+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
55546+ retval[1] = '\0';
55547+
55548+ return retval;
55549+}
55550+
55551+static char *
55552+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
55553+ char *buf, int buflen)
55554+{
55555+ char *res;
55556+
55557+ /* we can use real_root, real_root_mnt, because this is only called
55558+ by the RBAC system */
55559+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
55560+
55561+ return res;
55562+}
55563+
55564+static char *
55565+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
55566+ char *buf, int buflen)
55567+{
55568+ char *res;
55569+ struct dentry *root;
55570+ struct vfsmount *rootmnt;
55571+ struct task_struct *reaper = &init_task;
55572+
55573+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
55574+ read_lock(&reaper->fs->lock);
55575+ root = dget(reaper->fs->root.dentry);
55576+ rootmnt = mntget(reaper->fs->root.mnt);
55577+ read_unlock(&reaper->fs->lock);
55578+
55579+ spin_lock(&dcache_lock);
55580+ spin_lock(&vfsmount_lock);
55581+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
55582+ spin_unlock(&vfsmount_lock);
55583+ spin_unlock(&dcache_lock);
55584+
55585+ dput(root);
55586+ mntput(rootmnt);
55587+ return res;
55588+}
55589+
55590+static char *
55591+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
55592+{
55593+ char *ret;
55594+ spin_lock(&dcache_lock);
55595+ spin_lock(&vfsmount_lock);
55596+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
55597+ PAGE_SIZE);
55598+ spin_unlock(&vfsmount_lock);
55599+ spin_unlock(&dcache_lock);
55600+ return ret;
55601+}
55602+
55603+static char *
55604+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
55605+{
55606+ char *ret;
55607+ char *buf;
55608+ int buflen;
55609+
55610+ spin_lock(&dcache_lock);
55611+ spin_lock(&vfsmount_lock);
55612+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
55613+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
55614+ buflen = (int)(ret - buf);
55615+ if (buflen >= 5)
55616+ prepend(&ret, &buflen, "/proc", 5);
55617+ else
55618+ ret = strcpy(buf, "<path too long>");
55619+ spin_unlock(&vfsmount_lock);
55620+ spin_unlock(&dcache_lock);
55621+ return ret;
55622+}
55623+
55624+char *
55625+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
55626+{
55627+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
55628+ PAGE_SIZE);
55629+}
55630+
55631+char *
55632+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
55633+{
55634+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
55635+ PAGE_SIZE);
55636+}
55637+
55638+char *
55639+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
55640+{
55641+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
55642+ PAGE_SIZE);
55643+}
55644+
55645+char *
55646+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
55647+{
55648+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
55649+ PAGE_SIZE);
55650+}
55651+
55652+char *
55653+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
55654+{
55655+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
55656+ PAGE_SIZE);
55657+}
55658+
55659+__inline__ __u32
55660+to_gr_audit(const __u32 reqmode)
55661+{
55662+ /* masks off auditable permission flags, then shifts them to create
55663+ auditing flags, and adds the special case of append auditing if
55664+ we're requesting write */
55665+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
55666+}
55667+
55668+struct acl_subject_label *
55669+lookup_subject_map(const struct acl_subject_label *userp)
55670+{
55671+ unsigned int index = shash(userp, subj_map_set.s_size);
55672+ struct subject_map *match;
55673+
55674+ match = subj_map_set.s_hash[index];
55675+
55676+ while (match && match->user != userp)
55677+ match = match->next;
55678+
55679+ if (match != NULL)
55680+ return match->kernel;
55681+ else
55682+ return NULL;
55683+}
55684+
55685+static void
55686+insert_subj_map_entry(struct subject_map *subjmap)
55687+{
55688+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
55689+ struct subject_map **curr;
55690+
55691+ subjmap->prev = NULL;
55692+
55693+ curr = &subj_map_set.s_hash[index];
55694+ if (*curr != NULL)
55695+ (*curr)->prev = subjmap;
55696+
55697+ subjmap->next = *curr;
55698+ *curr = subjmap;
55699+
55700+ return;
55701+}
55702+
55703+static struct acl_role_label *
55704+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
55705+ const gid_t gid)
55706+{
55707+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
55708+ struct acl_role_label *match;
55709+ struct role_allowed_ip *ipp;
55710+ unsigned int x;
55711+ u32 curr_ip = task->signal->curr_ip;
55712+
55713+ task->signal->saved_ip = curr_ip;
55714+
55715+ match = acl_role_set.r_hash[index];
55716+
55717+ while (match) {
55718+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
55719+ for (x = 0; x < match->domain_child_num; x++) {
55720+ if (match->domain_children[x] == uid)
55721+ goto found;
55722+ }
55723+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
55724+ break;
55725+ match = match->next;
55726+ }
55727+found:
55728+ if (match == NULL) {
55729+ try_group:
55730+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
55731+ match = acl_role_set.r_hash[index];
55732+
55733+ while (match) {
55734+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
55735+ for (x = 0; x < match->domain_child_num; x++) {
55736+ if (match->domain_children[x] == gid)
55737+ goto found2;
55738+ }
55739+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
55740+ break;
55741+ match = match->next;
55742+ }
55743+found2:
55744+ if (match == NULL)
55745+ match = default_role;
55746+ if (match->allowed_ips == NULL)
55747+ return match;
55748+ else {
55749+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
55750+ if (likely
55751+ ((ntohl(curr_ip) & ipp->netmask) ==
55752+ (ntohl(ipp->addr) & ipp->netmask)))
55753+ return match;
55754+ }
55755+ match = default_role;
55756+ }
55757+ } else if (match->allowed_ips == NULL) {
55758+ return match;
55759+ } else {
55760+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
55761+ if (likely
55762+ ((ntohl(curr_ip) & ipp->netmask) ==
55763+ (ntohl(ipp->addr) & ipp->netmask)))
55764+ return match;
55765+ }
55766+ goto try_group;
55767+ }
55768+
55769+ return match;
55770+}
55771+
55772+struct acl_subject_label *
55773+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
55774+ const struct acl_role_label *role)
55775+{
55776+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
55777+ struct acl_subject_label *match;
55778+
55779+ match = role->subj_hash[index];
55780+
55781+ while (match && (match->inode != ino || match->device != dev ||
55782+ (match->mode & GR_DELETED))) {
55783+ match = match->next;
55784+ }
55785+
55786+ if (match && !(match->mode & GR_DELETED))
55787+ return match;
55788+ else
55789+ return NULL;
55790+}
55791+
55792+struct acl_subject_label *
55793+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
55794+ const struct acl_role_label *role)
55795+{
55796+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
55797+ struct acl_subject_label *match;
55798+
55799+ match = role->subj_hash[index];
55800+
55801+ while (match && (match->inode != ino || match->device != dev ||
55802+ !(match->mode & GR_DELETED))) {
55803+ match = match->next;
55804+ }
55805+
55806+ if (match && (match->mode & GR_DELETED))
55807+ return match;
55808+ else
55809+ return NULL;
55810+}
55811+
55812+static struct acl_object_label *
55813+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
55814+ const struct acl_subject_label *subj)
55815+{
55816+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
55817+ struct acl_object_label *match;
55818+
55819+ match = subj->obj_hash[index];
55820+
55821+ while (match && (match->inode != ino || match->device != dev ||
55822+ (match->mode & GR_DELETED))) {
55823+ match = match->next;
55824+ }
55825+
55826+ if (match && !(match->mode & GR_DELETED))
55827+ return match;
55828+ else
55829+ return NULL;
55830+}
55831+
55832+static struct acl_object_label *
55833+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
55834+ const struct acl_subject_label *subj)
55835+{
55836+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
55837+ struct acl_object_label *match;
55838+
55839+ match = subj->obj_hash[index];
55840+
55841+ while (match && (match->inode != ino || match->device != dev ||
55842+ !(match->mode & GR_DELETED))) {
55843+ match = match->next;
55844+ }
55845+
55846+ if (match && (match->mode & GR_DELETED))
55847+ return match;
55848+
55849+ match = subj->obj_hash[index];
55850+
55851+ while (match && (match->inode != ino || match->device != dev ||
55852+ (match->mode & GR_DELETED))) {
55853+ match = match->next;
55854+ }
55855+
55856+ if (match && !(match->mode & GR_DELETED))
55857+ return match;
55858+ else
55859+ return NULL;
55860+}
55861+
55862+static struct name_entry *
55863+lookup_name_entry(const char *name)
55864+{
55865+ unsigned int len = strlen(name);
55866+ unsigned int key = full_name_hash(name, len);
55867+ unsigned int index = key % name_set.n_size;
55868+ struct name_entry *match;
55869+
55870+ match = name_set.n_hash[index];
55871+
55872+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
55873+ match = match->next;
55874+
55875+ return match;
55876+}
55877+
55878+static struct name_entry *
55879+lookup_name_entry_create(const char *name)
55880+{
55881+ unsigned int len = strlen(name);
55882+ unsigned int key = full_name_hash(name, len);
55883+ unsigned int index = key % name_set.n_size;
55884+ struct name_entry *match;
55885+
55886+ match = name_set.n_hash[index];
55887+
55888+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
55889+ !match->deleted))
55890+ match = match->next;
55891+
55892+ if (match && match->deleted)
55893+ return match;
55894+
55895+ match = name_set.n_hash[index];
55896+
55897+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
55898+ match->deleted))
55899+ match = match->next;
55900+
55901+ if (match && !match->deleted)
55902+ return match;
55903+ else
55904+ return NULL;
55905+}
55906+
55907+static struct inodev_entry *
55908+lookup_inodev_entry(const ino_t ino, const dev_t dev)
55909+{
55910+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
55911+ struct inodev_entry *match;
55912+
55913+ match = inodev_set.i_hash[index];
55914+
55915+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
55916+ match = match->next;
55917+
55918+ return match;
55919+}
55920+
55921+static void
55922+insert_inodev_entry(struct inodev_entry *entry)
55923+{
55924+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
55925+ inodev_set.i_size);
55926+ struct inodev_entry **curr;
55927+
55928+ entry->prev = NULL;
55929+
55930+ curr = &inodev_set.i_hash[index];
55931+ if (*curr != NULL)
55932+ (*curr)->prev = entry;
55933+
55934+ entry->next = *curr;
55935+ *curr = entry;
55936+
55937+ return;
55938+}
55939+
55940+static void
55941+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
55942+{
55943+ unsigned int index =
55944+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
55945+ struct acl_role_label **curr;
55946+ struct acl_role_label *tmp;
55947+
55948+ curr = &acl_role_set.r_hash[index];
55949+
55950+ /* if role was already inserted due to domains and already has
55951+ a role in the same bucket as it attached, then we need to
55952+ combine these two buckets
55953+ */
55954+ if (role->next) {
55955+ tmp = role->next;
55956+ while (tmp->next)
55957+ tmp = tmp->next;
55958+ tmp->next = *curr;
55959+ } else
55960+ role->next = *curr;
55961+ *curr = role;
55962+
55963+ return;
55964+}
55965+
55966+static void
55967+insert_acl_role_label(struct acl_role_label *role)
55968+{
55969+ int i;
55970+
55971+ if (role_list == NULL) {
55972+ role_list = role;
55973+ role->prev = NULL;
55974+ } else {
55975+ role->prev = role_list;
55976+ role_list = role;
55977+ }
55978+
55979+ /* used for hash chains */
55980+ role->next = NULL;
55981+
55982+ if (role->roletype & GR_ROLE_DOMAIN) {
55983+ for (i = 0; i < role->domain_child_num; i++)
55984+ __insert_acl_role_label(role, role->domain_children[i]);
55985+ } else
55986+ __insert_acl_role_label(role, role->uidgid);
55987+}
55988+
55989+static int
55990+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
55991+{
55992+ struct name_entry **curr, *nentry;
55993+ struct inodev_entry *ientry;
55994+ unsigned int len = strlen(name);
55995+ unsigned int key = full_name_hash(name, len);
55996+ unsigned int index = key % name_set.n_size;
55997+
55998+ curr = &name_set.n_hash[index];
55999+
56000+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
56001+ curr = &((*curr)->next);
56002+
56003+ if (*curr != NULL)
56004+ return 1;
56005+
56006+ nentry = acl_alloc(sizeof (struct name_entry));
56007+ if (nentry == NULL)
56008+ return 0;
56009+ ientry = acl_alloc(sizeof (struct inodev_entry));
56010+ if (ientry == NULL)
56011+ return 0;
56012+ ientry->nentry = nentry;
56013+
56014+ nentry->key = key;
56015+ nentry->name = name;
56016+ nentry->inode = inode;
56017+ nentry->device = device;
56018+ nentry->len = len;
56019+ nentry->deleted = deleted;
56020+
56021+ nentry->prev = NULL;
56022+ curr = &name_set.n_hash[index];
56023+ if (*curr != NULL)
56024+ (*curr)->prev = nentry;
56025+ nentry->next = *curr;
56026+ *curr = nentry;
56027+
56028+ /* insert us into the table searchable by inode/dev */
56029+ insert_inodev_entry(ientry);
56030+
56031+ return 1;
56032+}
56033+
56034+static void
56035+insert_acl_obj_label(struct acl_object_label *obj,
56036+ struct acl_subject_label *subj)
56037+{
56038+ unsigned int index =
56039+ fhash(obj->inode, obj->device, subj->obj_hash_size);
56040+ struct acl_object_label **curr;
56041+
56042+
56043+ obj->prev = NULL;
56044+
56045+ curr = &subj->obj_hash[index];
56046+ if (*curr != NULL)
56047+ (*curr)->prev = obj;
56048+
56049+ obj->next = *curr;
56050+ *curr = obj;
56051+
56052+ return;
56053+}
56054+
56055+static void
56056+insert_acl_subj_label(struct acl_subject_label *obj,
56057+ struct acl_role_label *role)
56058+{
56059+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
56060+ struct acl_subject_label **curr;
56061+
56062+ obj->prev = NULL;
56063+
56064+ curr = &role->subj_hash[index];
56065+ if (*curr != NULL)
56066+ (*curr)->prev = obj;
56067+
56068+ obj->next = *curr;
56069+ *curr = obj;
56070+
56071+ return;
56072+}
56073+
56074+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
56075+
56076+static void *
56077+create_table(__u32 * len, int elementsize)
56078+{
56079+ unsigned int table_sizes[] = {
56080+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
56081+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
56082+ 4194301, 8388593, 16777213, 33554393, 67108859
56083+ };
56084+ void *newtable = NULL;
56085+ unsigned int pwr = 0;
56086+
56087+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
56088+ table_sizes[pwr] <= *len)
56089+ pwr++;
56090+
56091+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
56092+ return newtable;
56093+
56094+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
56095+ newtable =
56096+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
56097+ else
56098+ newtable = vmalloc(table_sizes[pwr] * elementsize);
56099+
56100+ *len = table_sizes[pwr];
56101+
56102+ return newtable;
56103+}
56104+
56105+static int
56106+init_variables(const struct gr_arg *arg)
56107+{
56108+ struct task_struct *reaper = &init_task;
56109+ unsigned int stacksize;
56110+
56111+ subj_map_set.s_size = arg->role_db.num_subjects;
56112+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
56113+ name_set.n_size = arg->role_db.num_objects;
56114+ inodev_set.i_size = arg->role_db.num_objects;
56115+
56116+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
56117+ !name_set.n_size || !inodev_set.i_size)
56118+ return 1;
56119+
56120+ if (!gr_init_uidset())
56121+ return 1;
56122+
56123+ /* set up the stack that holds allocation info */
56124+
56125+ stacksize = arg->role_db.num_pointers + 5;
56126+
56127+ if (!acl_alloc_stack_init(stacksize))
56128+ return 1;
56129+
56130+ /* grab reference for the real root dentry and vfsmount */
56131+ read_lock(&reaper->fs->lock);
56132+ real_root = dget(reaper->fs->root.dentry);
56133+ real_root_mnt = mntget(reaper->fs->root.mnt);
56134+ read_unlock(&reaper->fs->lock);
56135+
56136+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56137+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
56138+#endif
56139+
56140+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
56141+ if (fakefs_obj_rw == NULL)
56142+ return 1;
56143+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
56144+
56145+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
56146+ if (fakefs_obj_rwx == NULL)
56147+ return 1;
56148+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
56149+
56150+ subj_map_set.s_hash =
56151+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
56152+ acl_role_set.r_hash =
56153+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
56154+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
56155+ inodev_set.i_hash =
56156+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
56157+
56158+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
56159+ !name_set.n_hash || !inodev_set.i_hash)
56160+ return 1;
56161+
56162+ memset(subj_map_set.s_hash, 0,
56163+ sizeof(struct subject_map *) * subj_map_set.s_size);
56164+ memset(acl_role_set.r_hash, 0,
56165+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
56166+ memset(name_set.n_hash, 0,
56167+ sizeof (struct name_entry *) * name_set.n_size);
56168+ memset(inodev_set.i_hash, 0,
56169+ sizeof (struct inodev_entry *) * inodev_set.i_size);
56170+
56171+ return 0;
56172+}
56173+
56174+/* free information not needed after startup
56175+ currently contains user->kernel pointer mappings for subjects
56176+*/
56177+
56178+static void
56179+free_init_variables(void)
56180+{
56181+ __u32 i;
56182+
56183+ if (subj_map_set.s_hash) {
56184+ for (i = 0; i < subj_map_set.s_size; i++) {
56185+ if (subj_map_set.s_hash[i]) {
56186+ kfree(subj_map_set.s_hash[i]);
56187+ subj_map_set.s_hash[i] = NULL;
56188+ }
56189+ }
56190+
56191+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
56192+ PAGE_SIZE)
56193+ kfree(subj_map_set.s_hash);
56194+ else
56195+ vfree(subj_map_set.s_hash);
56196+ }
56197+
56198+ return;
56199+}
56200+
56201+static void
56202+free_variables(void)
56203+{
56204+ struct acl_subject_label *s;
56205+ struct acl_role_label *r;
56206+ struct task_struct *task, *task2;
56207+ unsigned int x;
56208+
56209+ gr_clear_learn_entries();
56210+
56211+ read_lock(&tasklist_lock);
56212+ do_each_thread(task2, task) {
56213+ task->acl_sp_role = 0;
56214+ task->acl_role_id = 0;
56215+ task->acl = NULL;
56216+ task->role = NULL;
56217+ } while_each_thread(task2, task);
56218+ read_unlock(&tasklist_lock);
56219+
56220+ /* release the reference to the real root dentry and vfsmount */
56221+ if (real_root)
56222+ dput(real_root);
56223+ real_root = NULL;
56224+ if (real_root_mnt)
56225+ mntput(real_root_mnt);
56226+ real_root_mnt = NULL;
56227+
56228+ /* free all object hash tables */
56229+
56230+ FOR_EACH_ROLE_START(r)
56231+ if (r->subj_hash == NULL)
56232+ goto next_role;
56233+ FOR_EACH_SUBJECT_START(r, s, x)
56234+ if (s->obj_hash == NULL)
56235+ break;
56236+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
56237+ kfree(s->obj_hash);
56238+ else
56239+ vfree(s->obj_hash);
56240+ FOR_EACH_SUBJECT_END(s, x)
56241+ FOR_EACH_NESTED_SUBJECT_START(r, s)
56242+ if (s->obj_hash == NULL)
56243+ break;
56244+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
56245+ kfree(s->obj_hash);
56246+ else
56247+ vfree(s->obj_hash);
56248+ FOR_EACH_NESTED_SUBJECT_END(s)
56249+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
56250+ kfree(r->subj_hash);
56251+ else
56252+ vfree(r->subj_hash);
56253+ r->subj_hash = NULL;
56254+next_role:
56255+ FOR_EACH_ROLE_END(r)
56256+
56257+ acl_free_all();
56258+
56259+ if (acl_role_set.r_hash) {
56260+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
56261+ PAGE_SIZE)
56262+ kfree(acl_role_set.r_hash);
56263+ else
56264+ vfree(acl_role_set.r_hash);
56265+ }
56266+ if (name_set.n_hash) {
56267+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
56268+ PAGE_SIZE)
56269+ kfree(name_set.n_hash);
56270+ else
56271+ vfree(name_set.n_hash);
56272+ }
56273+
56274+ if (inodev_set.i_hash) {
56275+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
56276+ PAGE_SIZE)
56277+ kfree(inodev_set.i_hash);
56278+ else
56279+ vfree(inodev_set.i_hash);
56280+ }
56281+
56282+ gr_free_uidset();
56283+
56284+ memset(&name_set, 0, sizeof (struct name_db));
56285+ memset(&inodev_set, 0, sizeof (struct inodev_db));
56286+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
56287+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
56288+
56289+ default_role = NULL;
56290+ role_list = NULL;
56291+
56292+ return;
56293+}
56294+
56295+static __u32
56296+count_user_objs(struct acl_object_label *userp)
56297+{
56298+ struct acl_object_label o_tmp;
56299+ __u32 num = 0;
56300+
56301+ while (userp) {
56302+ if (copy_from_user(&o_tmp, userp,
56303+ sizeof (struct acl_object_label)))
56304+ break;
56305+
56306+ userp = o_tmp.prev;
56307+ num++;
56308+ }
56309+
56310+ return num;
56311+}
56312+
56313+static struct acl_subject_label *
56314+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
56315+
56316+static int
56317+copy_user_glob(struct acl_object_label *obj)
56318+{
56319+ struct acl_object_label *g_tmp, **guser;
56320+ unsigned int len;
56321+ char *tmp;
56322+
56323+ if (obj->globbed == NULL)
56324+ return 0;
56325+
56326+ guser = &obj->globbed;
56327+ while (*guser) {
56328+ g_tmp = (struct acl_object_label *)
56329+ acl_alloc(sizeof (struct acl_object_label));
56330+ if (g_tmp == NULL)
56331+ return -ENOMEM;
56332+
56333+ if (copy_from_user(g_tmp, *guser,
56334+ sizeof (struct acl_object_label)))
56335+ return -EFAULT;
56336+
56337+ len = strnlen_user(g_tmp->filename, PATH_MAX);
56338+
56339+ if (!len || len >= PATH_MAX)
56340+ return -EINVAL;
56341+
56342+ if ((tmp = (char *) acl_alloc(len)) == NULL)
56343+ return -ENOMEM;
56344+
56345+ if (copy_from_user(tmp, g_tmp->filename, len))
56346+ return -EFAULT;
56347+ tmp[len-1] = '\0';
56348+ g_tmp->filename = tmp;
56349+
56350+ *guser = g_tmp;
56351+ guser = &(g_tmp->next);
56352+ }
56353+
56354+ return 0;
56355+}
56356+
56357+static int
56358+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
56359+ struct acl_role_label *role)
56360+{
56361+ struct acl_object_label *o_tmp;
56362+ unsigned int len;
56363+ int ret;
56364+ char *tmp;
56365+
56366+ while (userp) {
56367+ if ((o_tmp = (struct acl_object_label *)
56368+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
56369+ return -ENOMEM;
56370+
56371+ if (copy_from_user(o_tmp, userp,
56372+ sizeof (struct acl_object_label)))
56373+ return -EFAULT;
56374+
56375+ userp = o_tmp->prev;
56376+
56377+ len = strnlen_user(o_tmp->filename, PATH_MAX);
56378+
56379+ if (!len || len >= PATH_MAX)
56380+ return -EINVAL;
56381+
56382+ if ((tmp = (char *) acl_alloc(len)) == NULL)
56383+ return -ENOMEM;
56384+
56385+ if (copy_from_user(tmp, o_tmp->filename, len))
56386+ return -EFAULT;
56387+ tmp[len-1] = '\0';
56388+ o_tmp->filename = tmp;
56389+
56390+ insert_acl_obj_label(o_tmp, subj);
56391+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
56392+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
56393+ return -ENOMEM;
56394+
56395+ ret = copy_user_glob(o_tmp);
56396+ if (ret)
56397+ return ret;
56398+
56399+ if (o_tmp->nested) {
56400+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
56401+ if (IS_ERR(o_tmp->nested))
56402+ return PTR_ERR(o_tmp->nested);
56403+
56404+ /* insert into nested subject list */
56405+ o_tmp->nested->next = role->hash->first;
56406+ role->hash->first = o_tmp->nested;
56407+ }
56408+ }
56409+
56410+ return 0;
56411+}
56412+
56413+static __u32
56414+count_user_subjs(struct acl_subject_label *userp)
56415+{
56416+ struct acl_subject_label s_tmp;
56417+ __u32 num = 0;
56418+
56419+ while (userp) {
56420+ if (copy_from_user(&s_tmp, userp,
56421+ sizeof (struct acl_subject_label)))
56422+ break;
56423+
56424+ userp = s_tmp.prev;
56425+ /* do not count nested subjects against this count, since
56426+ they are not included in the hash table, but are
56427+ attached to objects. We have already counted
56428+ the subjects in userspace for the allocation
56429+ stack
56430+ */
56431+ if (!(s_tmp.mode & GR_NESTED))
56432+ num++;
56433+ }
56434+
56435+ return num;
56436+}
56437+
56438+static int
56439+copy_user_allowedips(struct acl_role_label *rolep)
56440+{
56441+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
56442+
56443+ ruserip = rolep->allowed_ips;
56444+
56445+ while (ruserip) {
56446+ rlast = rtmp;
56447+
56448+ if ((rtmp = (struct role_allowed_ip *)
56449+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
56450+ return -ENOMEM;
56451+
56452+ if (copy_from_user(rtmp, ruserip,
56453+ sizeof (struct role_allowed_ip)))
56454+ return -EFAULT;
56455+
56456+ ruserip = rtmp->prev;
56457+
56458+ if (!rlast) {
56459+ rtmp->prev = NULL;
56460+ rolep->allowed_ips = rtmp;
56461+ } else {
56462+ rlast->next = rtmp;
56463+ rtmp->prev = rlast;
56464+ }
56465+
56466+ if (!ruserip)
56467+ rtmp->next = NULL;
56468+ }
56469+
56470+ return 0;
56471+}
56472+
56473+static int
56474+copy_user_transitions(struct acl_role_label *rolep)
56475+{
56476+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
56477+
56478+ unsigned int len;
56479+ char *tmp;
56480+
56481+ rusertp = rolep->transitions;
56482+
56483+ while (rusertp) {
56484+ rlast = rtmp;
56485+
56486+ if ((rtmp = (struct role_transition *)
56487+ acl_alloc(sizeof (struct role_transition))) == NULL)
56488+ return -ENOMEM;
56489+
56490+ if (copy_from_user(rtmp, rusertp,
56491+ sizeof (struct role_transition)))
56492+ return -EFAULT;
56493+
56494+ rusertp = rtmp->prev;
56495+
56496+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
56497+
56498+ if (!len || len >= GR_SPROLE_LEN)
56499+ return -EINVAL;
56500+
56501+ if ((tmp = (char *) acl_alloc(len)) == NULL)
56502+ return -ENOMEM;
56503+
56504+ if (copy_from_user(tmp, rtmp->rolename, len))
56505+ return -EFAULT;
56506+ tmp[len-1] = '\0';
56507+ rtmp->rolename = tmp;
56508+
56509+ if (!rlast) {
56510+ rtmp->prev = NULL;
56511+ rolep->transitions = rtmp;
56512+ } else {
56513+ rlast->next = rtmp;
56514+ rtmp->prev = rlast;
56515+ }
56516+
56517+ if (!rusertp)
56518+ rtmp->next = NULL;
56519+ }
56520+
56521+ return 0;
56522+}
56523+
56524+static struct acl_subject_label *
56525+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
56526+{
56527+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
56528+ unsigned int len;
56529+ char *tmp;
56530+ __u32 num_objs;
56531+ struct acl_ip_label **i_tmp, *i_utmp2;
56532+ struct gr_hash_struct ghash;
56533+ struct subject_map *subjmap;
56534+ unsigned int i_num;
56535+ int err;
56536+
56537+ s_tmp = lookup_subject_map(userp);
56538+
56539+ /* we've already copied this subject into the kernel, just return
56540+ the reference to it, and don't copy it over again
56541+ */
56542+ if (s_tmp)
56543+ return(s_tmp);
56544+
56545+ if ((s_tmp = (struct acl_subject_label *)
56546+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
56547+ return ERR_PTR(-ENOMEM);
56548+
56549+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
56550+ if (subjmap == NULL)
56551+ return ERR_PTR(-ENOMEM);
56552+
56553+ subjmap->user = userp;
56554+ subjmap->kernel = s_tmp;
56555+ insert_subj_map_entry(subjmap);
56556+
56557+ if (copy_from_user(s_tmp, userp,
56558+ sizeof (struct acl_subject_label)))
56559+ return ERR_PTR(-EFAULT);
56560+
56561+ len = strnlen_user(s_tmp->filename, PATH_MAX);
56562+
56563+ if (!len || len >= PATH_MAX)
56564+ return ERR_PTR(-EINVAL);
56565+
56566+ if ((tmp = (char *) acl_alloc(len)) == NULL)
56567+ return ERR_PTR(-ENOMEM);
56568+
56569+ if (copy_from_user(tmp, s_tmp->filename, len))
56570+ return ERR_PTR(-EFAULT);
56571+ tmp[len-1] = '\0';
56572+ s_tmp->filename = tmp;
56573+
56574+ if (!strcmp(s_tmp->filename, "/"))
56575+ role->root_label = s_tmp;
56576+
56577+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
56578+ return ERR_PTR(-EFAULT);
56579+
56580+ /* copy user and group transition tables */
56581+
56582+ if (s_tmp->user_trans_num) {
56583+ uid_t *uidlist;
56584+
56585+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
56586+ if (uidlist == NULL)
56587+ return ERR_PTR(-ENOMEM);
56588+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
56589+ return ERR_PTR(-EFAULT);
56590+
56591+ s_tmp->user_transitions = uidlist;
56592+ }
56593+
56594+ if (s_tmp->group_trans_num) {
56595+ gid_t *gidlist;
56596+
56597+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
56598+ if (gidlist == NULL)
56599+ return ERR_PTR(-ENOMEM);
56600+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
56601+ return ERR_PTR(-EFAULT);
56602+
56603+ s_tmp->group_transitions = gidlist;
56604+ }
56605+
56606+ /* set up object hash table */
56607+ num_objs = count_user_objs(ghash.first);
56608+
56609+ s_tmp->obj_hash_size = num_objs;
56610+ s_tmp->obj_hash =
56611+ (struct acl_object_label **)
56612+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
56613+
56614+ if (!s_tmp->obj_hash)
56615+ return ERR_PTR(-ENOMEM);
56616+
56617+ memset(s_tmp->obj_hash, 0,
56618+ s_tmp->obj_hash_size *
56619+ sizeof (struct acl_object_label *));
56620+
56621+ /* add in objects */
56622+ err = copy_user_objs(ghash.first, s_tmp, role);
56623+
56624+ if (err)
56625+ return ERR_PTR(err);
56626+
56627+ /* set pointer for parent subject */
56628+ if (s_tmp->parent_subject) {
56629+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
56630+
56631+ if (IS_ERR(s_tmp2))
56632+ return s_tmp2;
56633+
56634+ s_tmp->parent_subject = s_tmp2;
56635+ }
56636+
56637+ /* add in ip acls */
56638+
56639+ if (!s_tmp->ip_num) {
56640+ s_tmp->ips = NULL;
56641+ goto insert;
56642+ }
56643+
56644+ i_tmp =
56645+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
56646+ sizeof (struct acl_ip_label *));
56647+
56648+ if (!i_tmp)
56649+ return ERR_PTR(-ENOMEM);
56650+
56651+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
56652+ *(i_tmp + i_num) =
56653+ (struct acl_ip_label *)
56654+ acl_alloc(sizeof (struct acl_ip_label));
56655+ if (!*(i_tmp + i_num))
56656+ return ERR_PTR(-ENOMEM);
56657+
56658+ if (copy_from_user
56659+ (&i_utmp2, s_tmp->ips + i_num,
56660+ sizeof (struct acl_ip_label *)))
56661+ return ERR_PTR(-EFAULT);
56662+
56663+ if (copy_from_user
56664+ (*(i_tmp + i_num), i_utmp2,
56665+ sizeof (struct acl_ip_label)))
56666+ return ERR_PTR(-EFAULT);
56667+
56668+ if ((*(i_tmp + i_num))->iface == NULL)
56669+ continue;
56670+
56671+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
56672+ if (!len || len >= IFNAMSIZ)
56673+ return ERR_PTR(-EINVAL);
56674+ tmp = acl_alloc(len);
56675+ if (tmp == NULL)
56676+ return ERR_PTR(-ENOMEM);
56677+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
56678+ return ERR_PTR(-EFAULT);
56679+ (*(i_tmp + i_num))->iface = tmp;
56680+ }
56681+
56682+ s_tmp->ips = i_tmp;
56683+
56684+insert:
56685+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
56686+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
56687+ return ERR_PTR(-ENOMEM);
56688+
56689+ return s_tmp;
56690+}
56691+
56692+static int
56693+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
56694+{
56695+ struct acl_subject_label s_pre;
56696+ struct acl_subject_label * ret;
56697+ int err;
56698+
56699+ while (userp) {
56700+ if (copy_from_user(&s_pre, userp,
56701+ sizeof (struct acl_subject_label)))
56702+ return -EFAULT;
56703+
56704+ /* do not add nested subjects here, add
56705+ while parsing objects
56706+ */
56707+
56708+ if (s_pre.mode & GR_NESTED) {
56709+ userp = s_pre.prev;
56710+ continue;
56711+ }
56712+
56713+ ret = do_copy_user_subj(userp, role);
56714+
56715+ err = PTR_ERR(ret);
56716+ if (IS_ERR(ret))
56717+ return err;
56718+
56719+ insert_acl_subj_label(ret, role);
56720+
56721+ userp = s_pre.prev;
56722+ }
56723+
56724+ return 0;
56725+}
56726+
56727+static int
56728+copy_user_acl(struct gr_arg *arg)
56729+{
56730+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
56731+ struct sprole_pw *sptmp;
56732+ struct gr_hash_struct *ghash;
56733+ uid_t *domainlist;
56734+ unsigned int r_num;
56735+ unsigned int len;
56736+ char *tmp;
56737+ int err = 0;
56738+ __u16 i;
56739+ __u32 num_subjs;
56740+
56741+ /* we need a default and kernel role */
56742+ if (arg->role_db.num_roles < 2)
56743+ return -EINVAL;
56744+
56745+ /* copy special role authentication info from userspace */
56746+
56747+ num_sprole_pws = arg->num_sprole_pws;
56748+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
56749+
56750+ if (!acl_special_roles) {
56751+ err = -ENOMEM;
56752+ goto cleanup;
56753+ }
56754+
56755+ for (i = 0; i < num_sprole_pws; i++) {
56756+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
56757+ if (!sptmp) {
56758+ err = -ENOMEM;
56759+ goto cleanup;
56760+ }
56761+ if (copy_from_user(sptmp, arg->sprole_pws + i,
56762+ sizeof (struct sprole_pw))) {
56763+ err = -EFAULT;
56764+ goto cleanup;
56765+ }
56766+
56767+ len =
56768+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
56769+
56770+ if (!len || len >= GR_SPROLE_LEN) {
56771+ err = -EINVAL;
56772+ goto cleanup;
56773+ }
56774+
56775+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
56776+ err = -ENOMEM;
56777+ goto cleanup;
56778+ }
56779+
56780+ if (copy_from_user(tmp, sptmp->rolename, len)) {
56781+ err = -EFAULT;
56782+ goto cleanup;
56783+ }
56784+ tmp[len-1] = '\0';
56785+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56786+ printk(KERN_ALERT "Copying special role %s\n", tmp);
56787+#endif
56788+ sptmp->rolename = tmp;
56789+ acl_special_roles[i] = sptmp;
56790+ }
56791+
56792+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
56793+
56794+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
56795+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
56796+
56797+ if (!r_tmp) {
56798+ err = -ENOMEM;
56799+ goto cleanup;
56800+ }
56801+
56802+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
56803+ sizeof (struct acl_role_label *))) {
56804+ err = -EFAULT;
56805+ goto cleanup;
56806+ }
56807+
56808+ if (copy_from_user(r_tmp, r_utmp2,
56809+ sizeof (struct acl_role_label))) {
56810+ err = -EFAULT;
56811+ goto cleanup;
56812+ }
56813+
56814+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
56815+
56816+ if (!len || len >= PATH_MAX) {
56817+ err = -EINVAL;
56818+ goto cleanup;
56819+ }
56820+
56821+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
56822+ err = -ENOMEM;
56823+ goto cleanup;
56824+ }
56825+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
56826+ err = -EFAULT;
56827+ goto cleanup;
56828+ }
56829+ tmp[len-1] = '\0';
56830+ r_tmp->rolename = tmp;
56831+
56832+ if (!strcmp(r_tmp->rolename, "default")
56833+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
56834+ default_role = r_tmp;
56835+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
56836+ kernel_role = r_tmp;
56837+ }
56838+
56839+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
56840+ err = -ENOMEM;
56841+ goto cleanup;
56842+ }
56843+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
56844+ err = -EFAULT;
56845+ goto cleanup;
56846+ }
56847+
56848+ r_tmp->hash = ghash;
56849+
56850+ num_subjs = count_user_subjs(r_tmp->hash->first);
56851+
56852+ r_tmp->subj_hash_size = num_subjs;
56853+ r_tmp->subj_hash =
56854+ (struct acl_subject_label **)
56855+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
56856+
56857+ if (!r_tmp->subj_hash) {
56858+ err = -ENOMEM;
56859+ goto cleanup;
56860+ }
56861+
56862+ err = copy_user_allowedips(r_tmp);
56863+ if (err)
56864+ goto cleanup;
56865+
56866+ /* copy domain info */
56867+ if (r_tmp->domain_children != NULL) {
56868+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
56869+ if (domainlist == NULL) {
56870+ err = -ENOMEM;
56871+ goto cleanup;
56872+ }
56873+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
56874+ err = -EFAULT;
56875+ goto cleanup;
56876+ }
56877+ r_tmp->domain_children = domainlist;
56878+ }
56879+
56880+ err = copy_user_transitions(r_tmp);
56881+ if (err)
56882+ goto cleanup;
56883+
56884+ memset(r_tmp->subj_hash, 0,
56885+ r_tmp->subj_hash_size *
56886+ sizeof (struct acl_subject_label *));
56887+
56888+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
56889+
56890+ if (err)
56891+ goto cleanup;
56892+
56893+ /* set nested subject list to null */
56894+ r_tmp->hash->first = NULL;
56895+
56896+ insert_acl_role_label(r_tmp);
56897+ }
56898+
56899+ goto return_err;
56900+ cleanup:
56901+ free_variables();
56902+ return_err:
56903+ return err;
56904+
56905+}
56906+
56907+static int
56908+gracl_init(struct gr_arg *args)
56909+{
56910+ int error = 0;
56911+
56912+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
56913+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
56914+
56915+ if (init_variables(args)) {
56916+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
56917+ error = -ENOMEM;
56918+ free_variables();
56919+ goto out;
56920+ }
56921+
56922+ error = copy_user_acl(args);
56923+ free_init_variables();
56924+ if (error) {
56925+ free_variables();
56926+ goto out;
56927+ }
56928+
56929+ if ((error = gr_set_acls(0))) {
56930+ free_variables();
56931+ goto out;
56932+ }
56933+
56934+ pax_open_kernel();
56935+ gr_status |= GR_READY;
56936+ pax_close_kernel();
56937+
56938+ out:
56939+ return error;
56940+}
56941+
56942+/* derived from glibc fnmatch() 0: match, 1: no match*/
56943+
56944+static int
56945+glob_match(const char *p, const char *n)
56946+{
56947+ char c;
56948+
56949+ while ((c = *p++) != '\0') {
56950+ switch (c) {
56951+ case '?':
56952+ if (*n == '\0')
56953+ return 1;
56954+ else if (*n == '/')
56955+ return 1;
56956+ break;
56957+ case '\\':
56958+ if (*n != c)
56959+ return 1;
56960+ break;
56961+ case '*':
56962+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
56963+ if (*n == '/')
56964+ return 1;
56965+ else if (c == '?') {
56966+ if (*n == '\0')
56967+ return 1;
56968+ else
56969+ ++n;
56970+ }
56971+ }
56972+ if (c == '\0') {
56973+ return 0;
56974+ } else {
56975+ const char *endp;
56976+
56977+ if ((endp = strchr(n, '/')) == NULL)
56978+ endp = n + strlen(n);
56979+
56980+ if (c == '[') {
56981+ for (--p; n < endp; ++n)
56982+ if (!glob_match(p, n))
56983+ return 0;
56984+ } else if (c == '/') {
56985+ while (*n != '\0' && *n != '/')
56986+ ++n;
56987+ if (*n == '/' && !glob_match(p, n + 1))
56988+ return 0;
56989+ } else {
56990+ for (--p; n < endp; ++n)
56991+ if (*n == c && !glob_match(p, n))
56992+ return 0;
56993+ }
56994+
56995+ return 1;
56996+ }
56997+ case '[':
56998+ {
56999+ int not;
57000+ char cold;
57001+
57002+ if (*n == '\0' || *n == '/')
57003+ return 1;
57004+
57005+ not = (*p == '!' || *p == '^');
57006+ if (not)
57007+ ++p;
57008+
57009+ c = *p++;
57010+ for (;;) {
57011+ unsigned char fn = (unsigned char)*n;
57012+
57013+ if (c == '\0')
57014+ return 1;
57015+ else {
57016+ if (c == fn)
57017+ goto matched;
57018+ cold = c;
57019+ c = *p++;
57020+
57021+ if (c == '-' && *p != ']') {
57022+ unsigned char cend = *p++;
57023+
57024+ if (cend == '\0')
57025+ return 1;
57026+
57027+ if (cold <= fn && fn <= cend)
57028+ goto matched;
57029+
57030+ c = *p++;
57031+ }
57032+ }
57033+
57034+ if (c == ']')
57035+ break;
57036+ }
57037+ if (!not)
57038+ return 1;
57039+ break;
57040+ matched:
57041+ while (c != ']') {
57042+ if (c == '\0')
57043+ return 1;
57044+
57045+ c = *p++;
57046+ }
57047+ if (not)
57048+ return 1;
57049+ }
57050+ break;
57051+ default:
57052+ if (c != *n)
57053+ return 1;
57054+ }
57055+
57056+ ++n;
57057+ }
57058+
57059+ if (*n == '\0')
57060+ return 0;
57061+
57062+ if (*n == '/')
57063+ return 0;
57064+
57065+ return 1;
57066+}
57067+
57068+static struct acl_object_label *
57069+chk_glob_label(struct acl_object_label *globbed,
57070+ struct dentry *dentry, struct vfsmount *mnt, char **path)
57071+{
57072+ struct acl_object_label *tmp;
57073+
57074+ if (*path == NULL)
57075+ *path = gr_to_filename_nolock(dentry, mnt);
57076+
57077+ tmp = globbed;
57078+
57079+ while (tmp) {
57080+ if (!glob_match(tmp->filename, *path))
57081+ return tmp;
57082+ tmp = tmp->next;
57083+ }
57084+
57085+ return NULL;
57086+}
57087+
57088+static struct acl_object_label *
57089+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57090+ const ino_t curr_ino, const dev_t curr_dev,
57091+ const struct acl_subject_label *subj, char **path, const int checkglob)
57092+{
57093+ struct acl_subject_label *tmpsubj;
57094+ struct acl_object_label *retval;
57095+ struct acl_object_label *retval2;
57096+
57097+ tmpsubj = (struct acl_subject_label *) subj;
57098+ read_lock(&gr_inode_lock);
57099+ do {
57100+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
57101+ if (retval) {
57102+ if (checkglob && retval->globbed) {
57103+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
57104+ (struct vfsmount *)orig_mnt, path);
57105+ if (retval2)
57106+ retval = retval2;
57107+ }
57108+ break;
57109+ }
57110+ } while ((tmpsubj = tmpsubj->parent_subject));
57111+ read_unlock(&gr_inode_lock);
57112+
57113+ return retval;
57114+}
57115+
57116+static __inline__ struct acl_object_label *
57117+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57118+ const struct dentry *curr_dentry,
57119+ const struct acl_subject_label *subj, char **path, const int checkglob)
57120+{
57121+ int newglob = checkglob;
57122+
57123+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
57124+ as we don't want a / * rule to match instead of the / object
57125+ don't do this for create lookups that call this function though, since they're looking up
57126+ on the parent and thus need globbing checks on all paths
57127+ */
57128+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
57129+ newglob = GR_NO_GLOB;
57130+
57131+ return __full_lookup(orig_dentry, orig_mnt,
57132+ curr_dentry->d_inode->i_ino,
57133+ __get_dev(curr_dentry), subj, path, newglob);
57134+}
57135+
57136+static struct acl_object_label *
57137+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57138+ const struct acl_subject_label *subj, char *path, const int checkglob)
57139+{
57140+ struct dentry *dentry = (struct dentry *) l_dentry;
57141+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57142+ struct acl_object_label *retval;
57143+
57144+ spin_lock(&dcache_lock);
57145+ spin_lock(&vfsmount_lock);
57146+
57147+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
57148+#ifdef CONFIG_NET
57149+ mnt == sock_mnt ||
57150+#endif
57151+#ifdef CONFIG_HUGETLBFS
57152+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
57153+#endif
57154+ /* ignore Eric Biederman */
57155+ IS_PRIVATE(l_dentry->d_inode))) {
57156+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
57157+ goto out;
57158+ }
57159+
57160+ for (;;) {
57161+ if (dentry == real_root && mnt == real_root_mnt)
57162+ break;
57163+
57164+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
57165+ if (mnt->mnt_parent == mnt)
57166+ break;
57167+
57168+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57169+ if (retval != NULL)
57170+ goto out;
57171+
57172+ dentry = mnt->mnt_mountpoint;
57173+ mnt = mnt->mnt_parent;
57174+ continue;
57175+ }
57176+
57177+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57178+ if (retval != NULL)
57179+ goto out;
57180+
57181+ dentry = dentry->d_parent;
57182+ }
57183+
57184+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57185+
57186+ if (retval == NULL)
57187+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
57188+out:
57189+ spin_unlock(&vfsmount_lock);
57190+ spin_unlock(&dcache_lock);
57191+
57192+ BUG_ON(retval == NULL);
57193+
57194+ return retval;
57195+}
57196+
57197+static __inline__ struct acl_object_label *
57198+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57199+ const struct acl_subject_label *subj)
57200+{
57201+ char *path = NULL;
57202+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
57203+}
57204+
57205+static __inline__ struct acl_object_label *
57206+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57207+ const struct acl_subject_label *subj)
57208+{
57209+ char *path = NULL;
57210+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
57211+}
57212+
57213+static __inline__ struct acl_object_label *
57214+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57215+ const struct acl_subject_label *subj, char *path)
57216+{
57217+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
57218+}
57219+
57220+static struct acl_subject_label *
57221+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57222+ const struct acl_role_label *role)
57223+{
57224+ struct dentry *dentry = (struct dentry *) l_dentry;
57225+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57226+ struct acl_subject_label *retval;
57227+
57228+ spin_lock(&dcache_lock);
57229+ spin_lock(&vfsmount_lock);
57230+
57231+ for (;;) {
57232+ if (dentry == real_root && mnt == real_root_mnt)
57233+ break;
57234+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
57235+ if (mnt->mnt_parent == mnt)
57236+ break;
57237+
57238+ read_lock(&gr_inode_lock);
57239+ retval =
57240+ lookup_acl_subj_label(dentry->d_inode->i_ino,
57241+ __get_dev(dentry), role);
57242+ read_unlock(&gr_inode_lock);
57243+ if (retval != NULL)
57244+ goto out;
57245+
57246+ dentry = mnt->mnt_mountpoint;
57247+ mnt = mnt->mnt_parent;
57248+ continue;
57249+ }
57250+
57251+ read_lock(&gr_inode_lock);
57252+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
57253+ __get_dev(dentry), role);
57254+ read_unlock(&gr_inode_lock);
57255+ if (retval != NULL)
57256+ goto out;
57257+
57258+ dentry = dentry->d_parent;
57259+ }
57260+
57261+ read_lock(&gr_inode_lock);
57262+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
57263+ __get_dev(dentry), role);
57264+ read_unlock(&gr_inode_lock);
57265+
57266+ if (unlikely(retval == NULL)) {
57267+ read_lock(&gr_inode_lock);
57268+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
57269+ __get_dev(real_root), role);
57270+ read_unlock(&gr_inode_lock);
57271+ }
57272+out:
57273+ spin_unlock(&vfsmount_lock);
57274+ spin_unlock(&dcache_lock);
57275+
57276+ BUG_ON(retval == NULL);
57277+
57278+ return retval;
57279+}
57280+
57281+static void
57282+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
57283+{
57284+ struct task_struct *task = current;
57285+ const struct cred *cred = current_cred();
57286+
57287+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
57288+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57289+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57290+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
57291+
57292+ return;
57293+}
57294+
57295+static void
57296+gr_log_learn_sysctl(const char *path, const __u32 mode)
57297+{
57298+ struct task_struct *task = current;
57299+ const struct cred *cred = current_cred();
57300+
57301+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
57302+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57303+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57304+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
57305+
57306+ return;
57307+}
57308+
57309+static void
57310+gr_log_learn_id_change(const char type, const unsigned int real,
57311+ const unsigned int effective, const unsigned int fs)
57312+{
57313+ struct task_struct *task = current;
57314+ const struct cred *cred = current_cred();
57315+
57316+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
57317+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57318+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57319+ type, real, effective, fs, &task->signal->saved_ip);
57320+
57321+ return;
57322+}
57323+
57324+__u32
57325+gr_search_file(const struct dentry * dentry, const __u32 mode,
57326+ const struct vfsmount * mnt)
57327+{
57328+ __u32 retval = mode;
57329+ struct acl_subject_label *curracl;
57330+ struct acl_object_label *currobj;
57331+
57332+ if (unlikely(!(gr_status & GR_READY)))
57333+ return (mode & ~GR_AUDITS);
57334+
57335+ curracl = current->acl;
57336+
57337+ currobj = chk_obj_label(dentry, mnt, curracl);
57338+ retval = currobj->mode & mode;
57339+
57340+ /* if we're opening a specified transfer file for writing
57341+ (e.g. /dev/initctl), then transfer our role to init
57342+ */
57343+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
57344+ current->role->roletype & GR_ROLE_PERSIST)) {
57345+ struct task_struct *task = init_pid_ns.child_reaper;
57346+
57347+ if (task->role != current->role) {
57348+ task->acl_sp_role = 0;
57349+ task->acl_role_id = current->acl_role_id;
57350+ task->role = current->role;
57351+ rcu_read_lock();
57352+ read_lock(&grsec_exec_file_lock);
57353+ gr_apply_subject_to_task(task);
57354+ read_unlock(&grsec_exec_file_lock);
57355+ rcu_read_unlock();
57356+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
57357+ }
57358+ }
57359+
57360+ if (unlikely
57361+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
57362+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
57363+ __u32 new_mode = mode;
57364+
57365+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
57366+
57367+ retval = new_mode;
57368+
57369+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
57370+ new_mode |= GR_INHERIT;
57371+
57372+ if (!(mode & GR_NOLEARN))
57373+ gr_log_learn(dentry, mnt, new_mode);
57374+ }
57375+
57376+ return retval;
57377+}
57378+
57379+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
57380+ const struct dentry *parent,
57381+ const struct vfsmount *mnt)
57382+{
57383+ struct name_entry *match;
57384+ struct acl_object_label *matchpo;
57385+ struct acl_subject_label *curracl;
57386+ char *path;
57387+
57388+ if (unlikely(!(gr_status & GR_READY)))
57389+ return NULL;
57390+
57391+ preempt_disable();
57392+ path = gr_to_filename_rbac(new_dentry, mnt);
57393+ match = lookup_name_entry_create(path);
57394+
57395+ curracl = current->acl;
57396+
57397+ if (match) {
57398+ read_lock(&gr_inode_lock);
57399+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
57400+ read_unlock(&gr_inode_lock);
57401+
57402+ if (matchpo) {
57403+ preempt_enable();
57404+ return matchpo;
57405+ }
57406+ }
57407+
57408+ // lookup parent
57409+
57410+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
57411+
57412+ preempt_enable();
57413+ return matchpo;
57414+}
57415+
57416+__u32
57417+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
57418+ const struct vfsmount * mnt, const __u32 mode)
57419+{
57420+ struct acl_object_label *matchpo;
57421+ __u32 retval;
57422+
57423+ if (unlikely(!(gr_status & GR_READY)))
57424+ return (mode & ~GR_AUDITS);
57425+
57426+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
57427+
57428+ retval = matchpo->mode & mode;
57429+
57430+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
57431+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
57432+ __u32 new_mode = mode;
57433+
57434+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
57435+
57436+ gr_log_learn(new_dentry, mnt, new_mode);
57437+ return new_mode;
57438+ }
57439+
57440+ return retval;
57441+}
57442+
57443+__u32
57444+gr_check_link(const struct dentry * new_dentry,
57445+ const struct dentry * parent_dentry,
57446+ const struct vfsmount * parent_mnt,
57447+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
57448+{
57449+ struct acl_object_label *obj;
57450+ __u32 oldmode, newmode;
57451+ __u32 needmode;
57452+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
57453+ GR_DELETE | GR_INHERIT;
57454+
57455+ if (unlikely(!(gr_status & GR_READY)))
57456+ return (GR_CREATE | GR_LINK);
57457+
57458+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
57459+ oldmode = obj->mode;
57460+
57461+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
57462+ newmode = obj->mode;
57463+
57464+ needmode = newmode & checkmodes;
57465+
57466+ // old name for hardlink must have at least the permissions of the new name
57467+ if ((oldmode & needmode) != needmode)
57468+ goto bad;
57469+
57470+ // if old name had restrictions/auditing, make sure the new name does as well
57471+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
57472+
57473+ // don't allow hardlinking of suid/sgid files without permission
57474+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
57475+ needmode |= GR_SETID;
57476+
57477+ if ((newmode & needmode) != needmode)
57478+ goto bad;
57479+
57480+ // enforce minimum permissions
57481+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
57482+ return newmode;
57483+bad:
57484+ needmode = oldmode;
57485+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
57486+ needmode |= GR_SETID;
57487+
57488+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
57489+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
57490+ return (GR_CREATE | GR_LINK);
57491+ } else if (newmode & GR_SUPPRESS)
57492+ return GR_SUPPRESS;
57493+ else
57494+ return 0;
57495+}
57496+
57497+int
57498+gr_check_hidden_task(const struct task_struct *task)
57499+{
57500+ if (unlikely(!(gr_status & GR_READY)))
57501+ return 0;
57502+
57503+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
57504+ return 1;
57505+
57506+ return 0;
57507+}
57508+
57509+int
57510+gr_check_protected_task(const struct task_struct *task)
57511+{
57512+ if (unlikely(!(gr_status & GR_READY) || !task))
57513+ return 0;
57514+
57515+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
57516+ task->acl != current->acl)
57517+ return 1;
57518+
57519+ return 0;
57520+}
57521+
57522+int
57523+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
57524+{
57525+ struct task_struct *p;
57526+ int ret = 0;
57527+
57528+ if (unlikely(!(gr_status & GR_READY) || !pid))
57529+ return ret;
57530+
57531+ read_lock(&tasklist_lock);
57532+ do_each_pid_task(pid, type, p) {
57533+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
57534+ p->acl != current->acl) {
57535+ ret = 1;
57536+ goto out;
57537+ }
57538+ } while_each_pid_task(pid, type, p);
57539+out:
57540+ read_unlock(&tasklist_lock);
57541+
57542+ return ret;
57543+}
57544+
57545+void
57546+gr_copy_label(struct task_struct *tsk)
57547+{
57548+ tsk->signal->used_accept = 0;
57549+ tsk->acl_sp_role = 0;
57550+ tsk->acl_role_id = current->acl_role_id;
57551+ tsk->acl = current->acl;
57552+ tsk->role = current->role;
57553+ tsk->signal->curr_ip = current->signal->curr_ip;
57554+ tsk->signal->saved_ip = current->signal->saved_ip;
57555+ if (current->exec_file)
57556+ get_file(current->exec_file);
57557+ tsk->exec_file = current->exec_file;
57558+ tsk->is_writable = current->is_writable;
57559+ if (unlikely(current->signal->used_accept)) {
57560+ current->signal->curr_ip = 0;
57561+ current->signal->saved_ip = 0;
57562+ }
57563+
57564+ return;
57565+}
57566+
57567+static void
57568+gr_set_proc_res(struct task_struct *task)
57569+{
57570+ struct acl_subject_label *proc;
57571+ unsigned short i;
57572+
57573+ proc = task->acl;
57574+
57575+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
57576+ return;
57577+
57578+ for (i = 0; i < RLIM_NLIMITS; i++) {
57579+ if (!(proc->resmask & (1 << i)))
57580+ continue;
57581+
57582+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
57583+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
57584+ }
57585+
57586+ return;
57587+}
57588+
57589+extern int __gr_process_user_ban(struct user_struct *user);
57590+
57591+int
57592+gr_check_user_change(int real, int effective, int fs)
57593+{
57594+ unsigned int i;
57595+ __u16 num;
57596+ uid_t *uidlist;
57597+ int curuid;
57598+ int realok = 0;
57599+ int effectiveok = 0;
57600+ int fsok = 0;
57601+
57602+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57603+ struct user_struct *user;
57604+
57605+ if (real == -1)
57606+ goto skipit;
57607+
57608+ user = find_user(real);
57609+ if (user == NULL)
57610+ goto skipit;
57611+
57612+ if (__gr_process_user_ban(user)) {
57613+ /* for find_user */
57614+ free_uid(user);
57615+ return 1;
57616+ }
57617+
57618+ /* for find_user */
57619+ free_uid(user);
57620+
57621+skipit:
57622+#endif
57623+
57624+ if (unlikely(!(gr_status & GR_READY)))
57625+ return 0;
57626+
57627+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
57628+ gr_log_learn_id_change('u', real, effective, fs);
57629+
57630+ num = current->acl->user_trans_num;
57631+ uidlist = current->acl->user_transitions;
57632+
57633+ if (uidlist == NULL)
57634+ return 0;
57635+
57636+ if (real == -1)
57637+ realok = 1;
57638+ if (effective == -1)
57639+ effectiveok = 1;
57640+ if (fs == -1)
57641+ fsok = 1;
57642+
57643+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
57644+ for (i = 0; i < num; i++) {
57645+ curuid = (int)uidlist[i];
57646+ if (real == curuid)
57647+ realok = 1;
57648+ if (effective == curuid)
57649+ effectiveok = 1;
57650+ if (fs == curuid)
57651+ fsok = 1;
57652+ }
57653+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
57654+ for (i = 0; i < num; i++) {
57655+ curuid = (int)uidlist[i];
57656+ if (real == curuid)
57657+ break;
57658+ if (effective == curuid)
57659+ break;
57660+ if (fs == curuid)
57661+ break;
57662+ }
57663+ /* not in deny list */
57664+ if (i == num) {
57665+ realok = 1;
57666+ effectiveok = 1;
57667+ fsok = 1;
57668+ }
57669+ }
57670+
57671+ if (realok && effectiveok && fsok)
57672+ return 0;
57673+ else {
57674+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
57675+ return 1;
57676+ }
57677+}
57678+
57679+int
57680+gr_check_group_change(int real, int effective, int fs)
57681+{
57682+ unsigned int i;
57683+ __u16 num;
57684+ gid_t *gidlist;
57685+ int curgid;
57686+ int realok = 0;
57687+ int effectiveok = 0;
57688+ int fsok = 0;
57689+
57690+ if (unlikely(!(gr_status & GR_READY)))
57691+ return 0;
57692+
57693+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
57694+ gr_log_learn_id_change('g', real, effective, fs);
57695+
57696+ num = current->acl->group_trans_num;
57697+ gidlist = current->acl->group_transitions;
57698+
57699+ if (gidlist == NULL)
57700+ return 0;
57701+
57702+ if (real == -1)
57703+ realok = 1;
57704+ if (effective == -1)
57705+ effectiveok = 1;
57706+ if (fs == -1)
57707+ fsok = 1;
57708+
57709+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
57710+ for (i = 0; i < num; i++) {
57711+ curgid = (int)gidlist[i];
57712+ if (real == curgid)
57713+ realok = 1;
57714+ if (effective == curgid)
57715+ effectiveok = 1;
57716+ if (fs == curgid)
57717+ fsok = 1;
57718+ }
57719+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
57720+ for (i = 0; i < num; i++) {
57721+ curgid = (int)gidlist[i];
57722+ if (real == curgid)
57723+ break;
57724+ if (effective == curgid)
57725+ break;
57726+ if (fs == curgid)
57727+ break;
57728+ }
57729+ /* not in deny list */
57730+ if (i == num) {
57731+ realok = 1;
57732+ effectiveok = 1;
57733+ fsok = 1;
57734+ }
57735+ }
57736+
57737+ if (realok && effectiveok && fsok)
57738+ return 0;
57739+ else {
57740+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
57741+ return 1;
57742+ }
57743+}
57744+
57745+void
57746+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
57747+{
57748+ struct acl_role_label *role = task->role;
57749+ struct acl_subject_label *subj = NULL;
57750+ struct acl_object_label *obj;
57751+ struct file *filp;
57752+
57753+ if (unlikely(!(gr_status & GR_READY)))
57754+ return;
57755+
57756+ filp = task->exec_file;
57757+
57758+ /* kernel process, we'll give them the kernel role */
57759+ if (unlikely(!filp)) {
57760+ task->role = kernel_role;
57761+ task->acl = kernel_role->root_label;
57762+ return;
57763+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
57764+ role = lookup_acl_role_label(task, uid, gid);
57765+
57766+ /* perform subject lookup in possibly new role
57767+ we can use this result below in the case where role == task->role
57768+ */
57769+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
57770+
57771+ /* if we changed uid/gid, but result in the same role
57772+ and are using inheritance, don't lose the inherited subject
57773+ if current subject is other than what normal lookup
57774+ would result in, we arrived via inheritance, don't
57775+ lose subject
57776+ */
57777+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
57778+ (subj == task->acl)))
57779+ task->acl = subj;
57780+
57781+ task->role = role;
57782+
57783+ task->is_writable = 0;
57784+
57785+ /* ignore additional mmap checks for processes that are writable
57786+ by the default ACL */
57787+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
57788+ if (unlikely(obj->mode & GR_WRITE))
57789+ task->is_writable = 1;
57790+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
57791+ if (unlikely(obj->mode & GR_WRITE))
57792+ task->is_writable = 1;
57793+
57794+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57795+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
57796+#endif
57797+
57798+ gr_set_proc_res(task);
57799+
57800+ return;
57801+}
57802+
57803+int
57804+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
57805+ const int unsafe_share)
57806+{
57807+ struct task_struct *task = current;
57808+ struct acl_subject_label *newacl;
57809+ struct acl_object_label *obj;
57810+ __u32 retmode;
57811+
57812+ if (unlikely(!(gr_status & GR_READY)))
57813+ return 0;
57814+
57815+ newacl = chk_subj_label(dentry, mnt, task->role);
57816+
57817+ task_lock(task);
57818+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
57819+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
57820+ !(task->role->roletype & GR_ROLE_GOD) &&
57821+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
57822+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
57823+ task_unlock(task);
57824+ if (unsafe_share)
57825+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
57826+ else
57827+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
57828+ return -EACCES;
57829+ }
57830+ task_unlock(task);
57831+
57832+ obj = chk_obj_label(dentry, mnt, task->acl);
57833+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
57834+
57835+ if (!(task->acl->mode & GR_INHERITLEARN) &&
57836+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
57837+ if (obj->nested)
57838+ task->acl = obj->nested;
57839+ else
57840+ task->acl = newacl;
57841+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
57842+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
57843+
57844+ task->is_writable = 0;
57845+
57846+ /* ignore additional mmap checks for processes that are writable
57847+ by the default ACL */
57848+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
57849+ if (unlikely(obj->mode & GR_WRITE))
57850+ task->is_writable = 1;
57851+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
57852+ if (unlikely(obj->mode & GR_WRITE))
57853+ task->is_writable = 1;
57854+
57855+ gr_set_proc_res(task);
57856+
57857+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57858+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
57859+#endif
57860+ return 0;
57861+}
57862+
57863+/* always called with valid inodev ptr */
57864+static void
57865+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
57866+{
57867+ struct acl_object_label *matchpo;
57868+ struct acl_subject_label *matchps;
57869+ struct acl_subject_label *subj;
57870+ struct acl_role_label *role;
57871+ unsigned int x;
57872+
57873+ FOR_EACH_ROLE_START(role)
57874+ FOR_EACH_SUBJECT_START(role, subj, x)
57875+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
57876+ matchpo->mode |= GR_DELETED;
57877+ FOR_EACH_SUBJECT_END(subj,x)
57878+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
57879+ if (subj->inode == ino && subj->device == dev)
57880+ subj->mode |= GR_DELETED;
57881+ FOR_EACH_NESTED_SUBJECT_END(subj)
57882+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
57883+ matchps->mode |= GR_DELETED;
57884+ FOR_EACH_ROLE_END(role)
57885+
57886+ inodev->nentry->deleted = 1;
57887+
57888+ return;
57889+}
57890+
57891+void
57892+gr_handle_delete(const ino_t ino, const dev_t dev)
57893+{
57894+ struct inodev_entry *inodev;
57895+
57896+ if (unlikely(!(gr_status & GR_READY)))
57897+ return;
57898+
57899+ write_lock(&gr_inode_lock);
57900+ inodev = lookup_inodev_entry(ino, dev);
57901+ if (inodev != NULL)
57902+ do_handle_delete(inodev, ino, dev);
57903+ write_unlock(&gr_inode_lock);
57904+
57905+ return;
57906+}
57907+
57908+static void
57909+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
57910+ const ino_t newinode, const dev_t newdevice,
57911+ struct acl_subject_label *subj)
57912+{
57913+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
57914+ struct acl_object_label *match;
57915+
57916+ match = subj->obj_hash[index];
57917+
57918+ while (match && (match->inode != oldinode ||
57919+ match->device != olddevice ||
57920+ !(match->mode & GR_DELETED)))
57921+ match = match->next;
57922+
57923+ if (match && (match->inode == oldinode)
57924+ && (match->device == olddevice)
57925+ && (match->mode & GR_DELETED)) {
57926+ if (match->prev == NULL) {
57927+ subj->obj_hash[index] = match->next;
57928+ if (match->next != NULL)
57929+ match->next->prev = NULL;
57930+ } else {
57931+ match->prev->next = match->next;
57932+ if (match->next != NULL)
57933+ match->next->prev = match->prev;
57934+ }
57935+ match->prev = NULL;
57936+ match->next = NULL;
57937+ match->inode = newinode;
57938+ match->device = newdevice;
57939+ match->mode &= ~GR_DELETED;
57940+
57941+ insert_acl_obj_label(match, subj);
57942+ }
57943+
57944+ return;
57945+}
57946+
57947+static void
57948+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
57949+ const ino_t newinode, const dev_t newdevice,
57950+ struct acl_role_label *role)
57951+{
57952+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
57953+ struct acl_subject_label *match;
57954+
57955+ match = role->subj_hash[index];
57956+
57957+ while (match && (match->inode != oldinode ||
57958+ match->device != olddevice ||
57959+ !(match->mode & GR_DELETED)))
57960+ match = match->next;
57961+
57962+ if (match && (match->inode == oldinode)
57963+ && (match->device == olddevice)
57964+ && (match->mode & GR_DELETED)) {
57965+ if (match->prev == NULL) {
57966+ role->subj_hash[index] = match->next;
57967+ if (match->next != NULL)
57968+ match->next->prev = NULL;
57969+ } else {
57970+ match->prev->next = match->next;
57971+ if (match->next != NULL)
57972+ match->next->prev = match->prev;
57973+ }
57974+ match->prev = NULL;
57975+ match->next = NULL;
57976+ match->inode = newinode;
57977+ match->device = newdevice;
57978+ match->mode &= ~GR_DELETED;
57979+
57980+ insert_acl_subj_label(match, role);
57981+ }
57982+
57983+ return;
57984+}
57985+
57986+static void
57987+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
57988+ const ino_t newinode, const dev_t newdevice)
57989+{
57990+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
57991+ struct inodev_entry *match;
57992+
57993+ match = inodev_set.i_hash[index];
57994+
57995+ while (match && (match->nentry->inode != oldinode ||
57996+ match->nentry->device != olddevice || !match->nentry->deleted))
57997+ match = match->next;
57998+
57999+ if (match && (match->nentry->inode == oldinode)
58000+ && (match->nentry->device == olddevice) &&
58001+ match->nentry->deleted) {
58002+ if (match->prev == NULL) {
58003+ inodev_set.i_hash[index] = match->next;
58004+ if (match->next != NULL)
58005+ match->next->prev = NULL;
58006+ } else {
58007+ match->prev->next = match->next;
58008+ if (match->next != NULL)
58009+ match->next->prev = match->prev;
58010+ }
58011+ match->prev = NULL;
58012+ match->next = NULL;
58013+ match->nentry->inode = newinode;
58014+ match->nentry->device = newdevice;
58015+ match->nentry->deleted = 0;
58016+
58017+ insert_inodev_entry(match);
58018+ }
58019+
58020+ return;
58021+}
58022+
58023+static void
58024+__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
58025+{
58026+ struct acl_subject_label *subj;
58027+ struct acl_role_label *role;
58028+ unsigned int x;
58029+
58030+ FOR_EACH_ROLE_START(role)
58031+ update_acl_subj_label(matchn->inode, matchn->device,
58032+ inode, dev, role);
58033+
58034+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
58035+ if ((subj->inode == inode) && (subj->device == dev)) {
58036+ subj->inode = inode;
58037+ subj->device = dev;
58038+ }
58039+ FOR_EACH_NESTED_SUBJECT_END(subj)
58040+ FOR_EACH_SUBJECT_START(role, subj, x)
58041+ update_acl_obj_label(matchn->inode, matchn->device,
58042+ inode, dev, subj);
58043+ FOR_EACH_SUBJECT_END(subj,x)
58044+ FOR_EACH_ROLE_END(role)
58045+
58046+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
58047+
58048+ return;
58049+}
58050+
58051+static void
58052+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
58053+ const struct vfsmount *mnt)
58054+{
58055+ ino_t ino = dentry->d_inode->i_ino;
58056+ dev_t dev = __get_dev(dentry);
58057+
58058+ __do_handle_create(matchn, ino, dev);
58059+
58060+ return;
58061+}
58062+
58063+void
58064+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
58065+{
58066+ struct name_entry *matchn;
58067+
58068+ if (unlikely(!(gr_status & GR_READY)))
58069+ return;
58070+
58071+ preempt_disable();
58072+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
58073+
58074+ if (unlikely((unsigned long)matchn)) {
58075+ write_lock(&gr_inode_lock);
58076+ do_handle_create(matchn, dentry, mnt);
58077+ write_unlock(&gr_inode_lock);
58078+ }
58079+ preempt_enable();
58080+
58081+ return;
58082+}
58083+
58084+void
58085+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
58086+{
58087+ struct name_entry *matchn;
58088+
58089+ if (unlikely(!(gr_status & GR_READY)))
58090+ return;
58091+
58092+ preempt_disable();
58093+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
58094+
58095+ if (unlikely((unsigned long)matchn)) {
58096+ write_lock(&gr_inode_lock);
58097+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
58098+ write_unlock(&gr_inode_lock);
58099+ }
58100+ preempt_enable();
58101+
58102+ return;
58103+}
58104+
58105+void
58106+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58107+ struct dentry *old_dentry,
58108+ struct dentry *new_dentry,
58109+ struct vfsmount *mnt, const __u8 replace)
58110+{
58111+ struct name_entry *matchn;
58112+ struct inodev_entry *inodev;
58113+ struct inode *inode = new_dentry->d_inode;
58114+ ino_t oldinode = old_dentry->d_inode->i_ino;
58115+ dev_t olddev = __get_dev(old_dentry);
58116+
58117+ /* vfs_rename swaps the name and parent link for old_dentry and
58118+ new_dentry
58119+ at this point, old_dentry has the new name, parent link, and inode
58120+ for the renamed file
58121+ if a file is being replaced by a rename, new_dentry has the inode
58122+ and name for the replaced file
58123+ */
58124+
58125+ if (unlikely(!(gr_status & GR_READY)))
58126+ return;
58127+
58128+ preempt_disable();
58129+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
58130+
58131+ /* we wouldn't have to check d_inode if it weren't for
58132+ NFS silly-renaming
58133+ */
58134+
58135+ write_lock(&gr_inode_lock);
58136+ if (unlikely(replace && inode)) {
58137+ ino_t newinode = inode->i_ino;
58138+ dev_t newdev = __get_dev(new_dentry);
58139+ inodev = lookup_inodev_entry(newinode, newdev);
58140+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
58141+ do_handle_delete(inodev, newinode, newdev);
58142+ }
58143+
58144+ inodev = lookup_inodev_entry(oldinode, olddev);
58145+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
58146+ do_handle_delete(inodev, oldinode, olddev);
58147+
58148+ if (unlikely((unsigned long)matchn))
58149+ do_handle_create(matchn, old_dentry, mnt);
58150+
58151+ write_unlock(&gr_inode_lock);
58152+ preempt_enable();
58153+
58154+ return;
58155+}
58156+
58157+static int
58158+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
58159+ unsigned char **sum)
58160+{
58161+ struct acl_role_label *r;
58162+ struct role_allowed_ip *ipp;
58163+ struct role_transition *trans;
58164+ unsigned int i;
58165+ int found = 0;
58166+ u32 curr_ip = current->signal->curr_ip;
58167+
58168+ current->signal->saved_ip = curr_ip;
58169+
58170+ /* check transition table */
58171+
58172+ for (trans = current->role->transitions; trans; trans = trans->next) {
58173+ if (!strcmp(rolename, trans->rolename)) {
58174+ found = 1;
58175+ break;
58176+ }
58177+ }
58178+
58179+ if (!found)
58180+ return 0;
58181+
58182+ /* handle special roles that do not require authentication
58183+ and check ip */
58184+
58185+ FOR_EACH_ROLE_START(r)
58186+ if (!strcmp(rolename, r->rolename) &&
58187+ (r->roletype & GR_ROLE_SPECIAL)) {
58188+ found = 0;
58189+ if (r->allowed_ips != NULL) {
58190+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
58191+ if ((ntohl(curr_ip) & ipp->netmask) ==
58192+ (ntohl(ipp->addr) & ipp->netmask))
58193+ found = 1;
58194+ }
58195+ } else
58196+ found = 2;
58197+ if (!found)
58198+ return 0;
58199+
58200+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
58201+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
58202+ *salt = NULL;
58203+ *sum = NULL;
58204+ return 1;
58205+ }
58206+ }
58207+ FOR_EACH_ROLE_END(r)
58208+
58209+ for (i = 0; i < num_sprole_pws; i++) {
58210+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
58211+ *salt = acl_special_roles[i]->salt;
58212+ *sum = acl_special_roles[i]->sum;
58213+ return 1;
58214+ }
58215+ }
58216+
58217+ return 0;
58218+}
58219+
58220+static void
58221+assign_special_role(char *rolename)
58222+{
58223+ struct acl_object_label *obj;
58224+ struct acl_role_label *r;
58225+ struct acl_role_label *assigned = NULL;
58226+ struct task_struct *tsk;
58227+ struct file *filp;
58228+
58229+ FOR_EACH_ROLE_START(r)
58230+ if (!strcmp(rolename, r->rolename) &&
58231+ (r->roletype & GR_ROLE_SPECIAL)) {
58232+ assigned = r;
58233+ break;
58234+ }
58235+ FOR_EACH_ROLE_END(r)
58236+
58237+ if (!assigned)
58238+ return;
58239+
58240+ read_lock(&tasklist_lock);
58241+ read_lock(&grsec_exec_file_lock);
58242+
58243+ tsk = current->real_parent;
58244+ if (tsk == NULL)
58245+ goto out_unlock;
58246+
58247+ filp = tsk->exec_file;
58248+ if (filp == NULL)
58249+ goto out_unlock;
58250+
58251+ tsk->is_writable = 0;
58252+
58253+ tsk->acl_sp_role = 1;
58254+ tsk->acl_role_id = ++acl_sp_role_value;
58255+ tsk->role = assigned;
58256+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
58257+
58258+ /* ignore additional mmap checks for processes that are writable
58259+ by the default ACL */
58260+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58261+ if (unlikely(obj->mode & GR_WRITE))
58262+ tsk->is_writable = 1;
58263+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
58264+ if (unlikely(obj->mode & GR_WRITE))
58265+ tsk->is_writable = 1;
58266+
58267+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58268+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
58269+#endif
58270+
58271+out_unlock:
58272+ read_unlock(&grsec_exec_file_lock);
58273+ read_unlock(&tasklist_lock);
58274+ return;
58275+}
58276+
58277+int gr_check_secure_terminal(struct task_struct *task)
58278+{
58279+ struct task_struct *p, *p2, *p3;
58280+ struct files_struct *files;
58281+ struct fdtable *fdt;
58282+ struct file *our_file = NULL, *file;
58283+ int i;
58284+
58285+ if (task->signal->tty == NULL)
58286+ return 1;
58287+
58288+ files = get_files_struct(task);
58289+ if (files != NULL) {
58290+ rcu_read_lock();
58291+ fdt = files_fdtable(files);
58292+ for (i=0; i < fdt->max_fds; i++) {
58293+ file = fcheck_files(files, i);
58294+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
58295+ get_file(file);
58296+ our_file = file;
58297+ }
58298+ }
58299+ rcu_read_unlock();
58300+ put_files_struct(files);
58301+ }
58302+
58303+ if (our_file == NULL)
58304+ return 1;
58305+
58306+ read_lock(&tasklist_lock);
58307+ do_each_thread(p2, p) {
58308+ files = get_files_struct(p);
58309+ if (files == NULL ||
58310+ (p->signal && p->signal->tty == task->signal->tty)) {
58311+ if (files != NULL)
58312+ put_files_struct(files);
58313+ continue;
58314+ }
58315+ rcu_read_lock();
58316+ fdt = files_fdtable(files);
58317+ for (i=0; i < fdt->max_fds; i++) {
58318+ file = fcheck_files(files, i);
58319+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
58320+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
58321+ p3 = task;
58322+ while (p3->pid > 0) {
58323+ if (p3 == p)
58324+ break;
58325+ p3 = p3->real_parent;
58326+ }
58327+ if (p3 == p)
58328+ break;
58329+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
58330+ gr_handle_alertkill(p);
58331+ rcu_read_unlock();
58332+ put_files_struct(files);
58333+ read_unlock(&tasklist_lock);
58334+ fput(our_file);
58335+ return 0;
58336+ }
58337+ }
58338+ rcu_read_unlock();
58339+ put_files_struct(files);
58340+ } while_each_thread(p2, p);
58341+ read_unlock(&tasklist_lock);
58342+
58343+ fput(our_file);
58344+ return 1;
58345+}
58346+
58347+ssize_t
58348+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
58349+{
58350+ struct gr_arg_wrapper uwrap;
58351+ unsigned char *sprole_salt = NULL;
58352+ unsigned char *sprole_sum = NULL;
58353+ int error = sizeof (struct gr_arg_wrapper);
58354+ int error2 = 0;
58355+
58356+ mutex_lock(&gr_dev_mutex);
58357+
58358+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
58359+ error = -EPERM;
58360+ goto out;
58361+ }
58362+
58363+ if (count != sizeof (struct gr_arg_wrapper)) {
58364+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
58365+ error = -EINVAL;
58366+ goto out;
58367+ }
58368+
58369+
58370+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
58371+ gr_auth_expires = 0;
58372+ gr_auth_attempts = 0;
58373+ }
58374+
58375+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
58376+ error = -EFAULT;
58377+ goto out;
58378+ }
58379+
58380+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
58381+ error = -EINVAL;
58382+ goto out;
58383+ }
58384+
58385+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
58386+ error = -EFAULT;
58387+ goto out;
58388+ }
58389+
58390+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
58391+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
58392+ time_after(gr_auth_expires, get_seconds())) {
58393+ error = -EBUSY;
58394+ goto out;
58395+ }
58396+
58397+ /* if non-root trying to do anything other than use a special role,
58398+ do not attempt authentication, do not count towards authentication
58399+ locking
58400+ */
58401+
58402+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
58403+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
58404+ current_uid()) {
58405+ error = -EPERM;
58406+ goto out;
58407+ }
58408+
58409+ /* ensure pw and special role name are null terminated */
58410+
58411+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
58412+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
58413+
58414+ /* Okay.
58415+ * We have our enough of the argument structure..(we have yet
58416+ * to copy_from_user the tables themselves) . Copy the tables
58417+ * only if we need them, i.e. for loading operations. */
58418+
58419+ switch (gr_usermode->mode) {
58420+ case GR_STATUS:
58421+ if (gr_status & GR_READY) {
58422+ error = 1;
58423+ if (!gr_check_secure_terminal(current))
58424+ error = 3;
58425+ } else
58426+ error = 2;
58427+ goto out;
58428+ case GR_SHUTDOWN:
58429+ if ((gr_status & GR_READY)
58430+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
58431+ pax_open_kernel();
58432+ gr_status &= ~GR_READY;
58433+ pax_close_kernel();
58434+
58435+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
58436+ free_variables();
58437+ memset(gr_usermode, 0, sizeof (struct gr_arg));
58438+ memset(gr_system_salt, 0, GR_SALT_LEN);
58439+ memset(gr_system_sum, 0, GR_SHA_LEN);
58440+ } else if (gr_status & GR_READY) {
58441+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
58442+ error = -EPERM;
58443+ } else {
58444+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
58445+ error = -EAGAIN;
58446+ }
58447+ break;
58448+ case GR_ENABLE:
58449+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
58450+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
58451+ else {
58452+ if (gr_status & GR_READY)
58453+ error = -EAGAIN;
58454+ else
58455+ error = error2;
58456+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
58457+ }
58458+ break;
58459+ case GR_RELOAD:
58460+ if (!(gr_status & GR_READY)) {
58461+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
58462+ error = -EAGAIN;
58463+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
58464+ lock_kernel();
58465+
58466+ pax_open_kernel();
58467+ gr_status &= ~GR_READY;
58468+ pax_close_kernel();
58469+
58470+ free_variables();
58471+ if (!(error2 = gracl_init(gr_usermode))) {
58472+ unlock_kernel();
58473+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
58474+ } else {
58475+ unlock_kernel();
58476+ error = error2;
58477+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
58478+ }
58479+ } else {
58480+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
58481+ error = -EPERM;
58482+ }
58483+ break;
58484+ case GR_SEGVMOD:
58485+ if (unlikely(!(gr_status & GR_READY))) {
58486+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
58487+ error = -EAGAIN;
58488+ break;
58489+ }
58490+
58491+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
58492+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
58493+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
58494+ struct acl_subject_label *segvacl;
58495+ segvacl =
58496+ lookup_acl_subj_label(gr_usermode->segv_inode,
58497+ gr_usermode->segv_device,
58498+ current->role);
58499+ if (segvacl) {
58500+ segvacl->crashes = 0;
58501+ segvacl->expires = 0;
58502+ }
58503+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
58504+ gr_remove_uid(gr_usermode->segv_uid);
58505+ }
58506+ } else {
58507+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
58508+ error = -EPERM;
58509+ }
58510+ break;
58511+ case GR_SPROLE:
58512+ case GR_SPROLEPAM:
58513+ if (unlikely(!(gr_status & GR_READY))) {
58514+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
58515+ error = -EAGAIN;
58516+ break;
58517+ }
58518+
58519+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
58520+ current->role->expires = 0;
58521+ current->role->auth_attempts = 0;
58522+ }
58523+
58524+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
58525+ time_after(current->role->expires, get_seconds())) {
58526+ error = -EBUSY;
58527+ goto out;
58528+ }
58529+
58530+ if (lookup_special_role_auth
58531+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
58532+ && ((!sprole_salt && !sprole_sum)
58533+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
58534+ char *p = "";
58535+ assign_special_role(gr_usermode->sp_role);
58536+ read_lock(&tasklist_lock);
58537+ if (current->real_parent)
58538+ p = current->real_parent->role->rolename;
58539+ read_unlock(&tasklist_lock);
58540+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
58541+ p, acl_sp_role_value);
58542+ } else {
58543+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
58544+ error = -EPERM;
58545+ if(!(current->role->auth_attempts++))
58546+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
58547+
58548+ goto out;
58549+ }
58550+ break;
58551+ case GR_UNSPROLE:
58552+ if (unlikely(!(gr_status & GR_READY))) {
58553+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
58554+ error = -EAGAIN;
58555+ break;
58556+ }
58557+
58558+ if (current->role->roletype & GR_ROLE_SPECIAL) {
58559+ char *p = "";
58560+ int i = 0;
58561+
58562+ read_lock(&tasklist_lock);
58563+ if (current->real_parent) {
58564+ p = current->real_parent->role->rolename;
58565+ i = current->real_parent->acl_role_id;
58566+ }
58567+ read_unlock(&tasklist_lock);
58568+
58569+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
58570+ gr_set_acls(1);
58571+ } else {
58572+ error = -EPERM;
58573+ goto out;
58574+ }
58575+ break;
58576+ default:
58577+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
58578+ error = -EINVAL;
58579+ break;
58580+ }
58581+
58582+ if (error != -EPERM)
58583+ goto out;
58584+
58585+ if(!(gr_auth_attempts++))
58586+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
58587+
58588+ out:
58589+ mutex_unlock(&gr_dev_mutex);
58590+ return error;
58591+}
58592+
58593+/* must be called with
58594+ rcu_read_lock();
58595+ read_lock(&tasklist_lock);
58596+ read_lock(&grsec_exec_file_lock);
58597+*/
58598+int gr_apply_subject_to_task(struct task_struct *task)
58599+{
58600+ struct acl_object_label *obj;
58601+ char *tmpname;
58602+ struct acl_subject_label *tmpsubj;
58603+ struct file *filp;
58604+ struct name_entry *nmatch;
58605+
58606+ filp = task->exec_file;
58607+ if (filp == NULL)
58608+ return 0;
58609+
58610+ /* the following is to apply the correct subject
58611+ on binaries running when the RBAC system
58612+ is enabled, when the binaries have been
58613+ replaced or deleted since their execution
58614+ -----
58615+ when the RBAC system starts, the inode/dev
58616+ from exec_file will be one the RBAC system
58617+ is unaware of. It only knows the inode/dev
58618+ of the present file on disk, or the absence
58619+ of it.
58620+ */
58621+ preempt_disable();
58622+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
58623+
58624+ nmatch = lookup_name_entry(tmpname);
58625+ preempt_enable();
58626+ tmpsubj = NULL;
58627+ if (nmatch) {
58628+ if (nmatch->deleted)
58629+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
58630+ else
58631+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
58632+ if (tmpsubj != NULL)
58633+ task->acl = tmpsubj;
58634+ }
58635+ if (tmpsubj == NULL)
58636+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
58637+ task->role);
58638+ if (task->acl) {
58639+ task->is_writable = 0;
58640+ /* ignore additional mmap checks for processes that are writable
58641+ by the default ACL */
58642+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58643+ if (unlikely(obj->mode & GR_WRITE))
58644+ task->is_writable = 1;
58645+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
58646+ if (unlikely(obj->mode & GR_WRITE))
58647+ task->is_writable = 1;
58648+
58649+ gr_set_proc_res(task);
58650+
58651+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58652+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58653+#endif
58654+ } else {
58655+ return 1;
58656+ }
58657+
58658+ return 0;
58659+}
58660+
58661+int
58662+gr_set_acls(const int type)
58663+{
58664+ struct task_struct *task, *task2;
58665+ struct acl_role_label *role = current->role;
58666+ __u16 acl_role_id = current->acl_role_id;
58667+ const struct cred *cred;
58668+ int ret;
58669+
58670+ rcu_read_lock();
58671+ read_lock(&tasklist_lock);
58672+ read_lock(&grsec_exec_file_lock);
58673+ do_each_thread(task2, task) {
58674+ /* check to see if we're called from the exit handler,
58675+ if so, only replace ACLs that have inherited the admin
58676+ ACL */
58677+
58678+ if (type && (task->role != role ||
58679+ task->acl_role_id != acl_role_id))
58680+ continue;
58681+
58682+ task->acl_role_id = 0;
58683+ task->acl_sp_role = 0;
58684+
58685+ if (task->exec_file) {
58686+ cred = __task_cred(task);
58687+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
58688+
58689+ ret = gr_apply_subject_to_task(task);
58690+ if (ret) {
58691+ read_unlock(&grsec_exec_file_lock);
58692+ read_unlock(&tasklist_lock);
58693+ rcu_read_unlock();
58694+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
58695+ return ret;
58696+ }
58697+ } else {
58698+ // it's a kernel process
58699+ task->role = kernel_role;
58700+ task->acl = kernel_role->root_label;
58701+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
58702+ task->acl->mode &= ~GR_PROCFIND;
58703+#endif
58704+ }
58705+ } while_each_thread(task2, task);
58706+ read_unlock(&grsec_exec_file_lock);
58707+ read_unlock(&tasklist_lock);
58708+ rcu_read_unlock();
58709+
58710+ return 0;
58711+}
58712+
58713+void
58714+gr_learn_resource(const struct task_struct *task,
58715+ const int res, const unsigned long wanted, const int gt)
58716+{
58717+ struct acl_subject_label *acl;
58718+ const struct cred *cred;
58719+
58720+ if (unlikely((gr_status & GR_READY) &&
58721+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
58722+ goto skip_reslog;
58723+
58724+#ifdef CONFIG_GRKERNSEC_RESLOG
58725+ gr_log_resource(task, res, wanted, gt);
58726+#endif
58727+ skip_reslog:
58728+
58729+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
58730+ return;
58731+
58732+ acl = task->acl;
58733+
58734+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
58735+ !(acl->resmask & (1 << (unsigned short) res))))
58736+ return;
58737+
58738+ if (wanted >= acl->res[res].rlim_cur) {
58739+ unsigned long res_add;
58740+
58741+ res_add = wanted;
58742+ switch (res) {
58743+ case RLIMIT_CPU:
58744+ res_add += GR_RLIM_CPU_BUMP;
58745+ break;
58746+ case RLIMIT_FSIZE:
58747+ res_add += GR_RLIM_FSIZE_BUMP;
58748+ break;
58749+ case RLIMIT_DATA:
58750+ res_add += GR_RLIM_DATA_BUMP;
58751+ break;
58752+ case RLIMIT_STACK:
58753+ res_add += GR_RLIM_STACK_BUMP;
58754+ break;
58755+ case RLIMIT_CORE:
58756+ res_add += GR_RLIM_CORE_BUMP;
58757+ break;
58758+ case RLIMIT_RSS:
58759+ res_add += GR_RLIM_RSS_BUMP;
58760+ break;
58761+ case RLIMIT_NPROC:
58762+ res_add += GR_RLIM_NPROC_BUMP;
58763+ break;
58764+ case RLIMIT_NOFILE:
58765+ res_add += GR_RLIM_NOFILE_BUMP;
58766+ break;
58767+ case RLIMIT_MEMLOCK:
58768+ res_add += GR_RLIM_MEMLOCK_BUMP;
58769+ break;
58770+ case RLIMIT_AS:
58771+ res_add += GR_RLIM_AS_BUMP;
58772+ break;
58773+ case RLIMIT_LOCKS:
58774+ res_add += GR_RLIM_LOCKS_BUMP;
58775+ break;
58776+ case RLIMIT_SIGPENDING:
58777+ res_add += GR_RLIM_SIGPENDING_BUMP;
58778+ break;
58779+ case RLIMIT_MSGQUEUE:
58780+ res_add += GR_RLIM_MSGQUEUE_BUMP;
58781+ break;
58782+ case RLIMIT_NICE:
58783+ res_add += GR_RLIM_NICE_BUMP;
58784+ break;
58785+ case RLIMIT_RTPRIO:
58786+ res_add += GR_RLIM_RTPRIO_BUMP;
58787+ break;
58788+ case RLIMIT_RTTIME:
58789+ res_add += GR_RLIM_RTTIME_BUMP;
58790+ break;
58791+ }
58792+
58793+ acl->res[res].rlim_cur = res_add;
58794+
58795+ if (wanted > acl->res[res].rlim_max)
58796+ acl->res[res].rlim_max = res_add;
58797+
58798+ /* only log the subject filename, since resource logging is supported for
58799+ single-subject learning only */
58800+ rcu_read_lock();
58801+ cred = __task_cred(task);
58802+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
58803+ task->role->roletype, cred->uid, cred->gid, acl->filename,
58804+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
58805+ "", (unsigned long) res, &task->signal->saved_ip);
58806+ rcu_read_unlock();
58807+ }
58808+
58809+ return;
58810+}
58811+
58812+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
58813+void
58814+pax_set_initial_flags(struct linux_binprm *bprm)
58815+{
58816+ struct task_struct *task = current;
58817+ struct acl_subject_label *proc;
58818+ unsigned long flags;
58819+
58820+ if (unlikely(!(gr_status & GR_READY)))
58821+ return;
58822+
58823+ flags = pax_get_flags(task);
58824+
58825+ proc = task->acl;
58826+
58827+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
58828+ flags &= ~MF_PAX_PAGEEXEC;
58829+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
58830+ flags &= ~MF_PAX_SEGMEXEC;
58831+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
58832+ flags &= ~MF_PAX_RANDMMAP;
58833+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
58834+ flags &= ~MF_PAX_EMUTRAMP;
58835+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
58836+ flags &= ~MF_PAX_MPROTECT;
58837+
58838+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
58839+ flags |= MF_PAX_PAGEEXEC;
58840+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
58841+ flags |= MF_PAX_SEGMEXEC;
58842+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
58843+ flags |= MF_PAX_RANDMMAP;
58844+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
58845+ flags |= MF_PAX_EMUTRAMP;
58846+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
58847+ flags |= MF_PAX_MPROTECT;
58848+
58849+ pax_set_flags(task, flags);
58850+
58851+ return;
58852+}
58853+#endif
58854+
58855+#ifdef CONFIG_SYSCTL
58856+/* Eric Biederman likes breaking userland ABI and every inode-based security
58857+ system to save 35kb of memory */
58858+
58859+/* we modify the passed in filename, but adjust it back before returning */
58860+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
58861+{
58862+ struct name_entry *nmatch;
58863+ char *p, *lastp = NULL;
58864+ struct acl_object_label *obj = NULL, *tmp;
58865+ struct acl_subject_label *tmpsubj;
58866+ char c = '\0';
58867+
58868+ read_lock(&gr_inode_lock);
58869+
58870+ p = name + len - 1;
58871+ do {
58872+ nmatch = lookup_name_entry(name);
58873+ if (lastp != NULL)
58874+ *lastp = c;
58875+
58876+ if (nmatch == NULL)
58877+ goto next_component;
58878+ tmpsubj = current->acl;
58879+ do {
58880+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
58881+ if (obj != NULL) {
58882+ tmp = obj->globbed;
58883+ while (tmp) {
58884+ if (!glob_match(tmp->filename, name)) {
58885+ obj = tmp;
58886+ goto found_obj;
58887+ }
58888+ tmp = tmp->next;
58889+ }
58890+ goto found_obj;
58891+ }
58892+ } while ((tmpsubj = tmpsubj->parent_subject));
58893+next_component:
58894+ /* end case */
58895+ if (p == name)
58896+ break;
58897+
58898+ while (*p != '/')
58899+ p--;
58900+ if (p == name)
58901+ lastp = p + 1;
58902+ else {
58903+ lastp = p;
58904+ p--;
58905+ }
58906+ c = *lastp;
58907+ *lastp = '\0';
58908+ } while (1);
58909+found_obj:
58910+ read_unlock(&gr_inode_lock);
58911+ /* obj returned will always be non-null */
58912+ return obj;
58913+}
58914+
58915+/* returns 0 when allowing, non-zero on error
58916+ op of 0 is used for readdir, so we don't log the names of hidden files
58917+*/
58918+__u32
58919+gr_handle_sysctl(const struct ctl_table *table, const int op)
58920+{
58921+ ctl_table *tmp;
58922+ const char *proc_sys = "/proc/sys";
58923+ char *path;
58924+ struct acl_object_label *obj;
58925+ unsigned short len = 0, pos = 0, depth = 0, i;
58926+ __u32 err = 0;
58927+ __u32 mode = 0;
58928+
58929+ if (unlikely(!(gr_status & GR_READY)))
58930+ return 0;
58931+
58932+ /* for now, ignore operations on non-sysctl entries if it's not a
58933+ readdir*/
58934+ if (table->child != NULL && op != 0)
58935+ return 0;
58936+
58937+ mode |= GR_FIND;
58938+ /* it's only a read if it's an entry, read on dirs is for readdir */
58939+ if (op & MAY_READ)
58940+ mode |= GR_READ;
58941+ if (op & MAY_WRITE)
58942+ mode |= GR_WRITE;
58943+
58944+ preempt_disable();
58945+
58946+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
58947+
58948+ /* it's only a read/write if it's an actual entry, not a dir
58949+ (which are opened for readdir)
58950+ */
58951+
58952+ /* convert the requested sysctl entry into a pathname */
58953+
58954+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
58955+ len += strlen(tmp->procname);
58956+ len++;
58957+ depth++;
58958+ }
58959+
58960+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
58961+ /* deny */
58962+ goto out;
58963+ }
58964+
58965+ memset(path, 0, PAGE_SIZE);
58966+
58967+ memcpy(path, proc_sys, strlen(proc_sys));
58968+
58969+ pos += strlen(proc_sys);
58970+
58971+ for (; depth > 0; depth--) {
58972+ path[pos] = '/';
58973+ pos++;
58974+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
58975+ if (depth == i) {
58976+ memcpy(path + pos, tmp->procname,
58977+ strlen(tmp->procname));
58978+ pos += strlen(tmp->procname);
58979+ }
58980+ i++;
58981+ }
58982+ }
58983+
58984+ obj = gr_lookup_by_name(path, pos);
58985+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
58986+
58987+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
58988+ ((err & mode) != mode))) {
58989+ __u32 new_mode = mode;
58990+
58991+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58992+
58993+ err = 0;
58994+ gr_log_learn_sysctl(path, new_mode);
58995+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
58996+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
58997+ err = -ENOENT;
58998+ } else if (!(err & GR_FIND)) {
58999+ err = -ENOENT;
59000+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
59001+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
59002+ path, (mode & GR_READ) ? " reading" : "",
59003+ (mode & GR_WRITE) ? " writing" : "");
59004+ err = -EACCES;
59005+ } else if ((err & mode) != mode) {
59006+ err = -EACCES;
59007+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
59008+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
59009+ path, (mode & GR_READ) ? " reading" : "",
59010+ (mode & GR_WRITE) ? " writing" : "");
59011+ err = 0;
59012+ } else
59013+ err = 0;
59014+
59015+ out:
59016+ preempt_enable();
59017+
59018+ return err;
59019+}
59020+#endif
59021+
59022+int
59023+gr_handle_proc_ptrace(struct task_struct *task)
59024+{
59025+ struct file *filp;
59026+ struct task_struct *tmp = task;
59027+ struct task_struct *curtemp = current;
59028+ __u32 retmode;
59029+
59030+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59031+ if (unlikely(!(gr_status & GR_READY)))
59032+ return 0;
59033+#endif
59034+
59035+ read_lock(&tasklist_lock);
59036+ read_lock(&grsec_exec_file_lock);
59037+ filp = task->exec_file;
59038+
59039+ while (tmp->pid > 0) {
59040+ if (tmp == curtemp)
59041+ break;
59042+ tmp = tmp->real_parent;
59043+ }
59044+
59045+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59046+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
59047+ read_unlock(&grsec_exec_file_lock);
59048+ read_unlock(&tasklist_lock);
59049+ return 1;
59050+ }
59051+
59052+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59053+ if (!(gr_status & GR_READY)) {
59054+ read_unlock(&grsec_exec_file_lock);
59055+ read_unlock(&tasklist_lock);
59056+ return 0;
59057+ }
59058+#endif
59059+
59060+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
59061+ read_unlock(&grsec_exec_file_lock);
59062+ read_unlock(&tasklist_lock);
59063+
59064+ if (retmode & GR_NOPTRACE)
59065+ return 1;
59066+
59067+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
59068+ && (current->acl != task->acl || (current->acl != current->role->root_label
59069+ && current->pid != task->pid)))
59070+ return 1;
59071+
59072+ return 0;
59073+}
59074+
59075+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
59076+{
59077+ if (unlikely(!(gr_status & GR_READY)))
59078+ return;
59079+
59080+ if (!(current->role->roletype & GR_ROLE_GOD))
59081+ return;
59082+
59083+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
59084+ p->role->rolename, gr_task_roletype_to_char(p),
59085+ p->acl->filename);
59086+}
59087+
59088+int
59089+gr_handle_ptrace(struct task_struct *task, const long request)
59090+{
59091+ struct task_struct *tmp = task;
59092+ struct task_struct *curtemp = current;
59093+ __u32 retmode;
59094+
59095+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59096+ if (unlikely(!(gr_status & GR_READY)))
59097+ return 0;
59098+#endif
59099+
59100+ read_lock(&tasklist_lock);
59101+ while (tmp->pid > 0) {
59102+ if (tmp == curtemp)
59103+ break;
59104+ tmp = tmp->real_parent;
59105+ }
59106+
59107+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59108+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
59109+ read_unlock(&tasklist_lock);
59110+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59111+ return 1;
59112+ }
59113+ read_unlock(&tasklist_lock);
59114+
59115+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59116+ if (!(gr_status & GR_READY))
59117+ return 0;
59118+#endif
59119+
59120+ read_lock(&grsec_exec_file_lock);
59121+ if (unlikely(!task->exec_file)) {
59122+ read_unlock(&grsec_exec_file_lock);
59123+ return 0;
59124+ }
59125+
59126+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
59127+ read_unlock(&grsec_exec_file_lock);
59128+
59129+ if (retmode & GR_NOPTRACE) {
59130+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59131+ return 1;
59132+ }
59133+
59134+ if (retmode & GR_PTRACERD) {
59135+ switch (request) {
59136+ case PTRACE_POKETEXT:
59137+ case PTRACE_POKEDATA:
59138+ case PTRACE_POKEUSR:
59139+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
59140+ case PTRACE_SETREGS:
59141+ case PTRACE_SETFPREGS:
59142+#endif
59143+#ifdef CONFIG_X86
59144+ case PTRACE_SETFPXREGS:
59145+#endif
59146+#ifdef CONFIG_ALTIVEC
59147+ case PTRACE_SETVRREGS:
59148+#endif
59149+ return 1;
59150+ default:
59151+ return 0;
59152+ }
59153+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
59154+ !(current->role->roletype & GR_ROLE_GOD) &&
59155+ (current->acl != task->acl)) {
59156+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59157+ return 1;
59158+ }
59159+
59160+ return 0;
59161+}
59162+
59163+static int is_writable_mmap(const struct file *filp)
59164+{
59165+ struct task_struct *task = current;
59166+ struct acl_object_label *obj, *obj2;
59167+
59168+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
59169+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
59170+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59171+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
59172+ task->role->root_label);
59173+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
59174+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
59175+ return 1;
59176+ }
59177+ }
59178+ return 0;
59179+}
59180+
59181+int
59182+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
59183+{
59184+ __u32 mode;
59185+
59186+ if (unlikely(!file || !(prot & PROT_EXEC)))
59187+ return 1;
59188+
59189+ if (is_writable_mmap(file))
59190+ return 0;
59191+
59192+ mode =
59193+ gr_search_file(file->f_path.dentry,
59194+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
59195+ file->f_path.mnt);
59196+
59197+ if (!gr_tpe_allow(file))
59198+ return 0;
59199+
59200+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
59201+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59202+ return 0;
59203+ } else if (unlikely(!(mode & GR_EXEC))) {
59204+ return 0;
59205+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
59206+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59207+ return 1;
59208+ }
59209+
59210+ return 1;
59211+}
59212+
59213+int
59214+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
59215+{
59216+ __u32 mode;
59217+
59218+ if (unlikely(!file || !(prot & PROT_EXEC)))
59219+ return 1;
59220+
59221+ if (is_writable_mmap(file))
59222+ return 0;
59223+
59224+ mode =
59225+ gr_search_file(file->f_path.dentry,
59226+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
59227+ file->f_path.mnt);
59228+
59229+ if (!gr_tpe_allow(file))
59230+ return 0;
59231+
59232+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
59233+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59234+ return 0;
59235+ } else if (unlikely(!(mode & GR_EXEC))) {
59236+ return 0;
59237+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
59238+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59239+ return 1;
59240+ }
59241+
59242+ return 1;
59243+}
59244+
59245+void
59246+gr_acl_handle_psacct(struct task_struct *task, const long code)
59247+{
59248+ unsigned long runtime;
59249+ unsigned long cputime;
59250+ unsigned int wday, cday;
59251+ __u8 whr, chr;
59252+ __u8 wmin, cmin;
59253+ __u8 wsec, csec;
59254+ struct timespec timeval;
59255+
59256+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
59257+ !(task->acl->mode & GR_PROCACCT)))
59258+ return;
59259+
59260+ do_posix_clock_monotonic_gettime(&timeval);
59261+ runtime = timeval.tv_sec - task->start_time.tv_sec;
59262+ wday = runtime / (3600 * 24);
59263+ runtime -= wday * (3600 * 24);
59264+ whr = runtime / 3600;
59265+ runtime -= whr * 3600;
59266+ wmin = runtime / 60;
59267+ runtime -= wmin * 60;
59268+ wsec = runtime;
59269+
59270+ cputime = (task->utime + task->stime) / HZ;
59271+ cday = cputime / (3600 * 24);
59272+ cputime -= cday * (3600 * 24);
59273+ chr = cputime / 3600;
59274+ cputime -= chr * 3600;
59275+ cmin = cputime / 60;
59276+ cputime -= cmin * 60;
59277+ csec = cputime;
59278+
59279+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
59280+
59281+ return;
59282+}
59283+
59284+void gr_set_kernel_label(struct task_struct *task)
59285+{
59286+ if (gr_status & GR_READY) {
59287+ task->role = kernel_role;
59288+ task->acl = kernel_role->root_label;
59289+ }
59290+ return;
59291+}
59292+
59293+#ifdef CONFIG_TASKSTATS
59294+int gr_is_taskstats_denied(int pid)
59295+{
59296+ struct task_struct *task;
59297+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59298+ const struct cred *cred;
59299+#endif
59300+ int ret = 0;
59301+
59302+ /* restrict taskstats viewing to un-chrooted root users
59303+ who have the 'view' subject flag if the RBAC system is enabled
59304+ */
59305+
59306+ rcu_read_lock();
59307+ read_lock(&tasklist_lock);
59308+ task = find_task_by_vpid(pid);
59309+ if (task) {
59310+#ifdef CONFIG_GRKERNSEC_CHROOT
59311+ if (proc_is_chrooted(task))
59312+ ret = -EACCES;
59313+#endif
59314+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59315+ cred = __task_cred(task);
59316+#ifdef CONFIG_GRKERNSEC_PROC_USER
59317+ if (cred->uid != 0)
59318+ ret = -EACCES;
59319+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59320+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
59321+ ret = -EACCES;
59322+#endif
59323+#endif
59324+ if (gr_status & GR_READY) {
59325+ if (!(task->acl->mode & GR_VIEW))
59326+ ret = -EACCES;
59327+ }
59328+ } else
59329+ ret = -ENOENT;
59330+
59331+ read_unlock(&tasklist_lock);
59332+ rcu_read_unlock();
59333+
59334+ return ret;
59335+}
59336+#endif
59337+
59338+/* AUXV entries are filled via a descendant of search_binary_handler
59339+ after we've already applied the subject for the target
59340+*/
59341+int gr_acl_enable_at_secure(void)
59342+{
59343+ if (unlikely(!(gr_status & GR_READY)))
59344+ return 0;
59345+
59346+ if (current->acl->mode & GR_ATSECURE)
59347+ return 1;
59348+
59349+ return 0;
59350+}
59351+
59352+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
59353+{
59354+ struct task_struct *task = current;
59355+ struct dentry *dentry = file->f_path.dentry;
59356+ struct vfsmount *mnt = file->f_path.mnt;
59357+ struct acl_object_label *obj, *tmp;
59358+ struct acl_subject_label *subj;
59359+ unsigned int bufsize;
59360+ int is_not_root;
59361+ char *path;
59362+ dev_t dev = __get_dev(dentry);
59363+
59364+ if (unlikely(!(gr_status & GR_READY)))
59365+ return 1;
59366+
59367+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59368+ return 1;
59369+
59370+ /* ignore Eric Biederman */
59371+ if (IS_PRIVATE(dentry->d_inode))
59372+ return 1;
59373+
59374+ subj = task->acl;
59375+ do {
59376+ obj = lookup_acl_obj_label(ino, dev, subj);
59377+ if (obj != NULL)
59378+ return (obj->mode & GR_FIND) ? 1 : 0;
59379+ } while ((subj = subj->parent_subject));
59380+
59381+ /* this is purely an optimization since we're looking for an object
59382+ for the directory we're doing a readdir on
59383+ if it's possible for any globbed object to match the entry we're
59384+ filling into the directory, then the object we find here will be
59385+ an anchor point with attached globbed objects
59386+ */
59387+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
59388+ if (obj->globbed == NULL)
59389+ return (obj->mode & GR_FIND) ? 1 : 0;
59390+
59391+ is_not_root = ((obj->filename[0] == '/') &&
59392+ (obj->filename[1] == '\0')) ? 0 : 1;
59393+ bufsize = PAGE_SIZE - namelen - is_not_root;
59394+
59395+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
59396+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
59397+ return 1;
59398+
59399+ preempt_disable();
59400+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
59401+ bufsize);
59402+
59403+ bufsize = strlen(path);
59404+
59405+ /* if base is "/", don't append an additional slash */
59406+ if (is_not_root)
59407+ *(path + bufsize) = '/';
59408+ memcpy(path + bufsize + is_not_root, name, namelen);
59409+ *(path + bufsize + namelen + is_not_root) = '\0';
59410+
59411+ tmp = obj->globbed;
59412+ while (tmp) {
59413+ if (!glob_match(tmp->filename, path)) {
59414+ preempt_enable();
59415+ return (tmp->mode & GR_FIND) ? 1 : 0;
59416+ }
59417+ tmp = tmp->next;
59418+ }
59419+ preempt_enable();
59420+ return (obj->mode & GR_FIND) ? 1 : 0;
59421+}
59422+
59423+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
59424+EXPORT_SYMBOL(gr_acl_is_enabled);
59425+#endif
59426+EXPORT_SYMBOL(gr_learn_resource);
59427+EXPORT_SYMBOL(gr_set_kernel_label);
59428+#ifdef CONFIG_SECURITY
59429+EXPORT_SYMBOL(gr_check_user_change);
59430+EXPORT_SYMBOL(gr_check_group_change);
59431+#endif
59432+
59433diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
59434new file mode 100644
59435index 0000000..34fefda
59436--- /dev/null
59437+++ b/grsecurity/gracl_alloc.c
59438@@ -0,0 +1,105 @@
59439+#include <linux/kernel.h>
59440+#include <linux/mm.h>
59441+#include <linux/slab.h>
59442+#include <linux/vmalloc.h>
59443+#include <linux/gracl.h>
59444+#include <linux/grsecurity.h>
59445+
59446+static unsigned long alloc_stack_next = 1;
59447+static unsigned long alloc_stack_size = 1;
59448+static void **alloc_stack;
59449+
59450+static __inline__ int
59451+alloc_pop(void)
59452+{
59453+ if (alloc_stack_next == 1)
59454+ return 0;
59455+
59456+ kfree(alloc_stack[alloc_stack_next - 2]);
59457+
59458+ alloc_stack_next--;
59459+
59460+ return 1;
59461+}
59462+
59463+static __inline__ int
59464+alloc_push(void *buf)
59465+{
59466+ if (alloc_stack_next >= alloc_stack_size)
59467+ return 1;
59468+
59469+ alloc_stack[alloc_stack_next - 1] = buf;
59470+
59471+ alloc_stack_next++;
59472+
59473+ return 0;
59474+}
59475+
59476+void *
59477+acl_alloc(unsigned long len)
59478+{
59479+ void *ret = NULL;
59480+
59481+ if (!len || len > PAGE_SIZE)
59482+ goto out;
59483+
59484+ ret = kmalloc(len, GFP_KERNEL);
59485+
59486+ if (ret) {
59487+ if (alloc_push(ret)) {
59488+ kfree(ret);
59489+ ret = NULL;
59490+ }
59491+ }
59492+
59493+out:
59494+ return ret;
59495+}
59496+
59497+void *
59498+acl_alloc_num(unsigned long num, unsigned long len)
59499+{
59500+ if (!len || (num > (PAGE_SIZE / len)))
59501+ return NULL;
59502+
59503+ return acl_alloc(num * len);
59504+}
59505+
59506+void
59507+acl_free_all(void)
59508+{
59509+ if (gr_acl_is_enabled() || !alloc_stack)
59510+ return;
59511+
59512+ while (alloc_pop()) ;
59513+
59514+ if (alloc_stack) {
59515+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
59516+ kfree(alloc_stack);
59517+ else
59518+ vfree(alloc_stack);
59519+ }
59520+
59521+ alloc_stack = NULL;
59522+ alloc_stack_size = 1;
59523+ alloc_stack_next = 1;
59524+
59525+ return;
59526+}
59527+
59528+int
59529+acl_alloc_stack_init(unsigned long size)
59530+{
59531+ if ((size * sizeof (void *)) <= PAGE_SIZE)
59532+ alloc_stack =
59533+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
59534+ else
59535+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
59536+
59537+ alloc_stack_size = size;
59538+
59539+ if (!alloc_stack)
59540+ return 0;
59541+ else
59542+ return 1;
59543+}
59544diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
59545new file mode 100644
59546index 0000000..955ddfb
59547--- /dev/null
59548+++ b/grsecurity/gracl_cap.c
59549@@ -0,0 +1,101 @@
59550+#include <linux/kernel.h>
59551+#include <linux/module.h>
59552+#include <linux/sched.h>
59553+#include <linux/gracl.h>
59554+#include <linux/grsecurity.h>
59555+#include <linux/grinternal.h>
59556+
59557+extern const char *captab_log[];
59558+extern int captab_log_entries;
59559+
59560+int
59561+gr_acl_is_capable(const int cap)
59562+{
59563+ struct task_struct *task = current;
59564+ const struct cred *cred = current_cred();
59565+ struct acl_subject_label *curracl;
59566+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
59567+ kernel_cap_t cap_audit = __cap_empty_set;
59568+
59569+ if (!gr_acl_is_enabled())
59570+ return 1;
59571+
59572+ curracl = task->acl;
59573+
59574+ cap_drop = curracl->cap_lower;
59575+ cap_mask = curracl->cap_mask;
59576+ cap_audit = curracl->cap_invert_audit;
59577+
59578+ while ((curracl = curracl->parent_subject)) {
59579+ /* if the cap isn't specified in the current computed mask but is specified in the
59580+ current level subject, and is lowered in the current level subject, then add
59581+ it to the set of dropped capabilities
59582+ otherwise, add the current level subject's mask to the current computed mask
59583+ */
59584+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
59585+ cap_raise(cap_mask, cap);
59586+ if (cap_raised(curracl->cap_lower, cap))
59587+ cap_raise(cap_drop, cap);
59588+ if (cap_raised(curracl->cap_invert_audit, cap))
59589+ cap_raise(cap_audit, cap);
59590+ }
59591+ }
59592+
59593+ if (!cap_raised(cap_drop, cap)) {
59594+ if (cap_raised(cap_audit, cap))
59595+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
59596+ return 1;
59597+ }
59598+
59599+ curracl = task->acl;
59600+
59601+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
59602+ && cap_raised(cred->cap_effective, cap)) {
59603+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
59604+ task->role->roletype, cred->uid,
59605+ cred->gid, task->exec_file ?
59606+ gr_to_filename(task->exec_file->f_path.dentry,
59607+ task->exec_file->f_path.mnt) : curracl->filename,
59608+ curracl->filename, 0UL,
59609+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
59610+ return 1;
59611+ }
59612+
59613+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
59614+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
59615+ return 0;
59616+}
59617+
59618+int
59619+gr_acl_is_capable_nolog(const int cap)
59620+{
59621+ struct acl_subject_label *curracl;
59622+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
59623+
59624+ if (!gr_acl_is_enabled())
59625+ return 1;
59626+
59627+ curracl = current->acl;
59628+
59629+ cap_drop = curracl->cap_lower;
59630+ cap_mask = curracl->cap_mask;
59631+
59632+ while ((curracl = curracl->parent_subject)) {
59633+ /* if the cap isn't specified in the current computed mask but is specified in the
59634+ current level subject, and is lowered in the current level subject, then add
59635+ it to the set of dropped capabilities
59636+ otherwise, add the current level subject's mask to the current computed mask
59637+ */
59638+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
59639+ cap_raise(cap_mask, cap);
59640+ if (cap_raised(curracl->cap_lower, cap))
59641+ cap_raise(cap_drop, cap);
59642+ }
59643+ }
59644+
59645+ if (!cap_raised(cap_drop, cap))
59646+ return 1;
59647+
59648+ return 0;
59649+}
59650+
59651diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
59652new file mode 100644
59653index 0000000..d5f210c
59654--- /dev/null
59655+++ b/grsecurity/gracl_fs.c
59656@@ -0,0 +1,433 @@
59657+#include <linux/kernel.h>
59658+#include <linux/sched.h>
59659+#include <linux/types.h>
59660+#include <linux/fs.h>
59661+#include <linux/file.h>
59662+#include <linux/stat.h>
59663+#include <linux/grsecurity.h>
59664+#include <linux/grinternal.h>
59665+#include <linux/gracl.h>
59666+
59667+__u32
59668+gr_acl_handle_hidden_file(const struct dentry * dentry,
59669+ const struct vfsmount * mnt)
59670+{
59671+ __u32 mode;
59672+
59673+ if (unlikely(!dentry->d_inode))
59674+ return GR_FIND;
59675+
59676+ mode =
59677+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
59678+
59679+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
59680+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
59681+ return mode;
59682+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
59683+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
59684+ return 0;
59685+ } else if (unlikely(!(mode & GR_FIND)))
59686+ return 0;
59687+
59688+ return GR_FIND;
59689+}
59690+
59691+__u32
59692+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
59693+ int acc_mode)
59694+{
59695+ __u32 reqmode = GR_FIND;
59696+ __u32 mode;
59697+
59698+ if (unlikely(!dentry->d_inode))
59699+ return reqmode;
59700+
59701+ if (acc_mode & MAY_APPEND)
59702+ reqmode |= GR_APPEND;
59703+ else if (acc_mode & MAY_WRITE)
59704+ reqmode |= GR_WRITE;
59705+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
59706+ reqmode |= GR_READ;
59707+
59708+ mode =
59709+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
59710+ mnt);
59711+
59712+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
59713+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
59714+ reqmode & GR_READ ? " reading" : "",
59715+ reqmode & GR_WRITE ? " writing" : reqmode &
59716+ GR_APPEND ? " appending" : "");
59717+ return reqmode;
59718+ } else
59719+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
59720+ {
59721+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
59722+ reqmode & GR_READ ? " reading" : "",
59723+ reqmode & GR_WRITE ? " writing" : reqmode &
59724+ GR_APPEND ? " appending" : "");
59725+ return 0;
59726+ } else if (unlikely((mode & reqmode) != reqmode))
59727+ return 0;
59728+
59729+ return reqmode;
59730+}
59731+
59732+__u32
59733+gr_acl_handle_creat(const struct dentry * dentry,
59734+ const struct dentry * p_dentry,
59735+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
59736+ const int imode)
59737+{
59738+ __u32 reqmode = GR_WRITE | GR_CREATE;
59739+ __u32 mode;
59740+
59741+ if (acc_mode & MAY_APPEND)
59742+ reqmode |= GR_APPEND;
59743+ // if a directory was required or the directory already exists, then
59744+ // don't count this open as a read
59745+ if ((acc_mode & MAY_READ) &&
59746+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
59747+ reqmode |= GR_READ;
59748+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
59749+ reqmode |= GR_SETID;
59750+
59751+ mode =
59752+ gr_check_create(dentry, p_dentry, p_mnt,
59753+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
59754+
59755+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
59756+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
59757+ reqmode & GR_READ ? " reading" : "",
59758+ reqmode & GR_WRITE ? " writing" : reqmode &
59759+ GR_APPEND ? " appending" : "");
59760+ return reqmode;
59761+ } else
59762+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
59763+ {
59764+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
59765+ reqmode & GR_READ ? " reading" : "",
59766+ reqmode & GR_WRITE ? " writing" : reqmode &
59767+ GR_APPEND ? " appending" : "");
59768+ return 0;
59769+ } else if (unlikely((mode & reqmode) != reqmode))
59770+ return 0;
59771+
59772+ return reqmode;
59773+}
59774+
59775+__u32
59776+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
59777+ const int fmode)
59778+{
59779+ __u32 mode, reqmode = GR_FIND;
59780+
59781+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
59782+ reqmode |= GR_EXEC;
59783+ if (fmode & S_IWOTH)
59784+ reqmode |= GR_WRITE;
59785+ if (fmode & S_IROTH)
59786+ reqmode |= GR_READ;
59787+
59788+ mode =
59789+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
59790+ mnt);
59791+
59792+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
59793+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
59794+ reqmode & GR_READ ? " reading" : "",
59795+ reqmode & GR_WRITE ? " writing" : "",
59796+ reqmode & GR_EXEC ? " executing" : "");
59797+ return reqmode;
59798+ } else
59799+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
59800+ {
59801+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
59802+ reqmode & GR_READ ? " reading" : "",
59803+ reqmode & GR_WRITE ? " writing" : "",
59804+ reqmode & GR_EXEC ? " executing" : "");
59805+ return 0;
59806+ } else if (unlikely((mode & reqmode) != reqmode))
59807+ return 0;
59808+
59809+ return reqmode;
59810+}
59811+
59812+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
59813+{
59814+ __u32 mode;
59815+
59816+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
59817+
59818+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
59819+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
59820+ return mode;
59821+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
59822+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
59823+ return 0;
59824+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
59825+ return 0;
59826+
59827+ return (reqmode);
59828+}
59829+
59830+__u32
59831+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
59832+{
59833+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
59834+}
59835+
59836+__u32
59837+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
59838+{
59839+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
59840+}
59841+
59842+__u32
59843+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
59844+{
59845+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
59846+}
59847+
59848+__u32
59849+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
59850+{
59851+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
59852+}
59853+
59854+__u32
59855+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
59856+ mode_t mode)
59857+{
59858+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
59859+ return 1;
59860+
59861+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
59862+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
59863+ GR_FCHMOD_ACL_MSG);
59864+ } else {
59865+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
59866+ }
59867+}
59868+
59869+__u32
59870+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
59871+ mode_t mode)
59872+{
59873+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
59874+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
59875+ GR_CHMOD_ACL_MSG);
59876+ } else {
59877+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
59878+ }
59879+}
59880+
59881+__u32
59882+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
59883+{
59884+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
59885+}
59886+
59887+__u32
59888+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
59889+{
59890+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
59891+}
59892+
59893+__u32
59894+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
59895+{
59896+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
59897+}
59898+
59899+__u32
59900+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
59901+{
59902+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
59903+ GR_UNIXCONNECT_ACL_MSG);
59904+}
59905+
59906+/* hardlinks require at minimum create and link permission,
59907+ any additional privilege required is based on the
59908+ privilege of the file being linked to
59909+*/
59910+__u32
59911+gr_acl_handle_link(const struct dentry * new_dentry,
59912+ const struct dentry * parent_dentry,
59913+ const struct vfsmount * parent_mnt,
59914+ const struct dentry * old_dentry,
59915+ const struct vfsmount * old_mnt, const char *to)
59916+{
59917+ __u32 mode;
59918+ __u32 needmode = GR_CREATE | GR_LINK;
59919+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
59920+
59921+ mode =
59922+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
59923+ old_mnt);
59924+
59925+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
59926+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
59927+ return mode;
59928+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
59929+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
59930+ return 0;
59931+ } else if (unlikely((mode & needmode) != needmode))
59932+ return 0;
59933+
59934+ return 1;
59935+}
59936+
59937+__u32
59938+gr_acl_handle_symlink(const struct dentry * new_dentry,
59939+ const struct dentry * parent_dentry,
59940+ const struct vfsmount * parent_mnt, const char *from)
59941+{
59942+ __u32 needmode = GR_WRITE | GR_CREATE;
59943+ __u32 mode;
59944+
59945+ mode =
59946+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
59947+ GR_CREATE | GR_AUDIT_CREATE |
59948+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
59949+
59950+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
59951+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
59952+ return mode;
59953+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
59954+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
59955+ return 0;
59956+ } else if (unlikely((mode & needmode) != needmode))
59957+ return 0;
59958+
59959+ return (GR_WRITE | GR_CREATE);
59960+}
59961+
59962+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
59963+{
59964+ __u32 mode;
59965+
59966+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
59967+
59968+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
59969+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
59970+ return mode;
59971+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
59972+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
59973+ return 0;
59974+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
59975+ return 0;
59976+
59977+ return (reqmode);
59978+}
59979+
59980+__u32
59981+gr_acl_handle_mknod(const struct dentry * new_dentry,
59982+ const struct dentry * parent_dentry,
59983+ const struct vfsmount * parent_mnt,
59984+ const int mode)
59985+{
59986+ __u32 reqmode = GR_WRITE | GR_CREATE;
59987+ if (unlikely(mode & (S_ISUID | S_ISGID)))
59988+ reqmode |= GR_SETID;
59989+
59990+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
59991+ reqmode, GR_MKNOD_ACL_MSG);
59992+}
59993+
59994+__u32
59995+gr_acl_handle_mkdir(const struct dentry *new_dentry,
59996+ const struct dentry *parent_dentry,
59997+ const struct vfsmount *parent_mnt)
59998+{
59999+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60000+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
60001+}
60002+
60003+#define RENAME_CHECK_SUCCESS(old, new) \
60004+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
60005+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
60006+
60007+int
60008+gr_acl_handle_rename(struct dentry *new_dentry,
60009+ struct dentry *parent_dentry,
60010+ const struct vfsmount *parent_mnt,
60011+ struct dentry *old_dentry,
60012+ struct inode *old_parent_inode,
60013+ struct vfsmount *old_mnt, const char *newname)
60014+{
60015+ __u32 comp1, comp2;
60016+ int error = 0;
60017+
60018+ if (unlikely(!gr_acl_is_enabled()))
60019+ return 0;
60020+
60021+ if (!new_dentry->d_inode) {
60022+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
60023+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
60024+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
60025+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
60026+ GR_DELETE | GR_AUDIT_DELETE |
60027+ GR_AUDIT_READ | GR_AUDIT_WRITE |
60028+ GR_SUPPRESS, old_mnt);
60029+ } else {
60030+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
60031+ GR_CREATE | GR_DELETE |
60032+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
60033+ GR_AUDIT_READ | GR_AUDIT_WRITE |
60034+ GR_SUPPRESS, parent_mnt);
60035+ comp2 =
60036+ gr_search_file(old_dentry,
60037+ GR_READ | GR_WRITE | GR_AUDIT_READ |
60038+ GR_DELETE | GR_AUDIT_DELETE |
60039+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
60040+ }
60041+
60042+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
60043+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
60044+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60045+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
60046+ && !(comp2 & GR_SUPPRESS)) {
60047+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60048+ error = -EACCES;
60049+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
60050+ error = -EACCES;
60051+
60052+ return error;
60053+}
60054+
60055+void
60056+gr_acl_handle_exit(void)
60057+{
60058+ u16 id;
60059+ char *rolename;
60060+ struct file *exec_file;
60061+
60062+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
60063+ !(current->role->roletype & GR_ROLE_PERSIST))) {
60064+ id = current->acl_role_id;
60065+ rolename = current->role->rolename;
60066+ gr_set_acls(1);
60067+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
60068+ }
60069+
60070+ write_lock(&grsec_exec_file_lock);
60071+ exec_file = current->exec_file;
60072+ current->exec_file = NULL;
60073+ write_unlock(&grsec_exec_file_lock);
60074+
60075+ if (exec_file)
60076+ fput(exec_file);
60077+}
60078+
60079+int
60080+gr_acl_handle_procpidmem(const struct task_struct *task)
60081+{
60082+ if (unlikely(!gr_acl_is_enabled()))
60083+ return 0;
60084+
60085+ if (task != current && task->acl->mode & GR_PROTPROCFD)
60086+ return -EACCES;
60087+
60088+ return 0;
60089+}
60090diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
60091new file mode 100644
60092index 0000000..cd07b96
60093--- /dev/null
60094+++ b/grsecurity/gracl_ip.c
60095@@ -0,0 +1,382 @@
60096+#include <linux/kernel.h>
60097+#include <asm/uaccess.h>
60098+#include <asm/errno.h>
60099+#include <net/sock.h>
60100+#include <linux/file.h>
60101+#include <linux/fs.h>
60102+#include <linux/net.h>
60103+#include <linux/in.h>
60104+#include <linux/skbuff.h>
60105+#include <linux/ip.h>
60106+#include <linux/udp.h>
60107+#include <linux/smp_lock.h>
60108+#include <linux/types.h>
60109+#include <linux/sched.h>
60110+#include <linux/netdevice.h>
60111+#include <linux/inetdevice.h>
60112+#include <linux/gracl.h>
60113+#include <linux/grsecurity.h>
60114+#include <linux/grinternal.h>
60115+
60116+#define GR_BIND 0x01
60117+#define GR_CONNECT 0x02
60118+#define GR_INVERT 0x04
60119+#define GR_BINDOVERRIDE 0x08
60120+#define GR_CONNECTOVERRIDE 0x10
60121+#define GR_SOCK_FAMILY 0x20
60122+
60123+static const char * gr_protocols[IPPROTO_MAX] = {
60124+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
60125+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
60126+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
60127+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
60128+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
60129+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
60130+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
60131+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
60132+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
60133+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
60134+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
60135+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
60136+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
60137+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
60138+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
60139+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
60140+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
60141+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
60142+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
60143+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
60144+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
60145+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
60146+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
60147+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
60148+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
60149+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
60150+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
60151+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
60152+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
60153+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
60154+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
60155+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
60156+ };
60157+
60158+static const char * gr_socktypes[SOCK_MAX] = {
60159+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
60160+ "unknown:7", "unknown:8", "unknown:9", "packet"
60161+ };
60162+
60163+static const char * gr_sockfamilies[AF_MAX+1] = {
60164+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
60165+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
60166+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
60167+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
60168+ };
60169+
60170+const char *
60171+gr_proto_to_name(unsigned char proto)
60172+{
60173+ return gr_protocols[proto];
60174+}
60175+
60176+const char *
60177+gr_socktype_to_name(unsigned char type)
60178+{
60179+ return gr_socktypes[type];
60180+}
60181+
60182+const char *
60183+gr_sockfamily_to_name(unsigned char family)
60184+{
60185+ return gr_sockfamilies[family];
60186+}
60187+
60188+int
60189+gr_search_socket(const int domain, const int type, const int protocol)
60190+{
60191+ struct acl_subject_label *curr;
60192+ const struct cred *cred = current_cred();
60193+
60194+ if (unlikely(!gr_acl_is_enabled()))
60195+ goto exit;
60196+
60197+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
60198+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
60199+ goto exit; // let the kernel handle it
60200+
60201+ curr = current->acl;
60202+
60203+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
60204+ /* the family is allowed, if this is PF_INET allow it only if
60205+ the extra sock type/protocol checks pass */
60206+ if (domain == PF_INET)
60207+ goto inet_check;
60208+ goto exit;
60209+ } else {
60210+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60211+ __u32 fakeip = 0;
60212+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60213+ current->role->roletype, cred->uid,
60214+ cred->gid, current->exec_file ?
60215+ gr_to_filename(current->exec_file->f_path.dentry,
60216+ current->exec_file->f_path.mnt) :
60217+ curr->filename, curr->filename,
60218+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
60219+ &current->signal->saved_ip);
60220+ goto exit;
60221+ }
60222+ goto exit_fail;
60223+ }
60224+
60225+inet_check:
60226+ /* the rest of this checking is for IPv4 only */
60227+ if (!curr->ips)
60228+ goto exit;
60229+
60230+ if ((curr->ip_type & (1 << type)) &&
60231+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
60232+ goto exit;
60233+
60234+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60235+ /* we don't place acls on raw sockets , and sometimes
60236+ dgram/ip sockets are opened for ioctl and not
60237+ bind/connect, so we'll fake a bind learn log */
60238+ if (type == SOCK_RAW || type == SOCK_PACKET) {
60239+ __u32 fakeip = 0;
60240+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60241+ current->role->roletype, cred->uid,
60242+ cred->gid, current->exec_file ?
60243+ gr_to_filename(current->exec_file->f_path.dentry,
60244+ current->exec_file->f_path.mnt) :
60245+ curr->filename, curr->filename,
60246+ &fakeip, 0, type,
60247+ protocol, GR_CONNECT, &current->signal->saved_ip);
60248+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
60249+ __u32 fakeip = 0;
60250+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60251+ current->role->roletype, cred->uid,
60252+ cred->gid, current->exec_file ?
60253+ gr_to_filename(current->exec_file->f_path.dentry,
60254+ current->exec_file->f_path.mnt) :
60255+ curr->filename, curr->filename,
60256+ &fakeip, 0, type,
60257+ protocol, GR_BIND, &current->signal->saved_ip);
60258+ }
60259+ /* we'll log when they use connect or bind */
60260+ goto exit;
60261+ }
60262+
60263+exit_fail:
60264+ if (domain == PF_INET)
60265+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
60266+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
60267+ else
60268+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
60269+ gr_socktype_to_name(type), protocol);
60270+
60271+ return 0;
60272+exit:
60273+ return 1;
60274+}
60275+
60276+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
60277+{
60278+ if ((ip->mode & mode) &&
60279+ (ip_port >= ip->low) &&
60280+ (ip_port <= ip->high) &&
60281+ ((ntohl(ip_addr) & our_netmask) ==
60282+ (ntohl(our_addr) & our_netmask))
60283+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
60284+ && (ip->type & (1 << type))) {
60285+ if (ip->mode & GR_INVERT)
60286+ return 2; // specifically denied
60287+ else
60288+ return 1; // allowed
60289+ }
60290+
60291+ return 0; // not specifically allowed, may continue parsing
60292+}
60293+
60294+static int
60295+gr_search_connectbind(const int full_mode, struct sock *sk,
60296+ struct sockaddr_in *addr, const int type)
60297+{
60298+ char iface[IFNAMSIZ] = {0};
60299+ struct acl_subject_label *curr;
60300+ struct acl_ip_label *ip;
60301+ struct inet_sock *isk;
60302+ struct net_device *dev;
60303+ struct in_device *idev;
60304+ unsigned long i;
60305+ int ret;
60306+ int mode = full_mode & (GR_BIND | GR_CONNECT);
60307+ __u32 ip_addr = 0;
60308+ __u32 our_addr;
60309+ __u32 our_netmask;
60310+ char *p;
60311+ __u16 ip_port = 0;
60312+ const struct cred *cred = current_cred();
60313+
60314+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
60315+ return 0;
60316+
60317+ curr = current->acl;
60318+ isk = inet_sk(sk);
60319+
60320+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
60321+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
60322+ addr->sin_addr.s_addr = curr->inaddr_any_override;
60323+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
60324+ struct sockaddr_in saddr;
60325+ int err;
60326+
60327+ saddr.sin_family = AF_INET;
60328+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
60329+ saddr.sin_port = isk->sport;
60330+
60331+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
60332+ if (err)
60333+ return err;
60334+
60335+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
60336+ if (err)
60337+ return err;
60338+ }
60339+
60340+ if (!curr->ips)
60341+ return 0;
60342+
60343+ ip_addr = addr->sin_addr.s_addr;
60344+ ip_port = ntohs(addr->sin_port);
60345+
60346+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60347+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60348+ current->role->roletype, cred->uid,
60349+ cred->gid, current->exec_file ?
60350+ gr_to_filename(current->exec_file->f_path.dentry,
60351+ current->exec_file->f_path.mnt) :
60352+ curr->filename, curr->filename,
60353+ &ip_addr, ip_port, type,
60354+ sk->sk_protocol, mode, &current->signal->saved_ip);
60355+ return 0;
60356+ }
60357+
60358+ for (i = 0; i < curr->ip_num; i++) {
60359+ ip = *(curr->ips + i);
60360+ if (ip->iface != NULL) {
60361+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
60362+ p = strchr(iface, ':');
60363+ if (p != NULL)
60364+ *p = '\0';
60365+ dev = dev_get_by_name(sock_net(sk), iface);
60366+ if (dev == NULL)
60367+ continue;
60368+ idev = in_dev_get(dev);
60369+ if (idev == NULL) {
60370+ dev_put(dev);
60371+ continue;
60372+ }
60373+ rcu_read_lock();
60374+ for_ifa(idev) {
60375+ if (!strcmp(ip->iface, ifa->ifa_label)) {
60376+ our_addr = ifa->ifa_address;
60377+ our_netmask = 0xffffffff;
60378+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
60379+ if (ret == 1) {
60380+ rcu_read_unlock();
60381+ in_dev_put(idev);
60382+ dev_put(dev);
60383+ return 0;
60384+ } else if (ret == 2) {
60385+ rcu_read_unlock();
60386+ in_dev_put(idev);
60387+ dev_put(dev);
60388+ goto denied;
60389+ }
60390+ }
60391+ } endfor_ifa(idev);
60392+ rcu_read_unlock();
60393+ in_dev_put(idev);
60394+ dev_put(dev);
60395+ } else {
60396+ our_addr = ip->addr;
60397+ our_netmask = ip->netmask;
60398+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
60399+ if (ret == 1)
60400+ return 0;
60401+ else if (ret == 2)
60402+ goto denied;
60403+ }
60404+ }
60405+
60406+denied:
60407+ if (mode == GR_BIND)
60408+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
60409+ else if (mode == GR_CONNECT)
60410+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
60411+
60412+ return -EACCES;
60413+}
60414+
60415+int
60416+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
60417+{
60418+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
60419+}
60420+
60421+int
60422+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
60423+{
60424+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
60425+}
60426+
60427+int gr_search_listen(struct socket *sock)
60428+{
60429+ struct sock *sk = sock->sk;
60430+ struct sockaddr_in addr;
60431+
60432+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
60433+ addr.sin_port = inet_sk(sk)->sport;
60434+
60435+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
60436+}
60437+
60438+int gr_search_accept(struct socket *sock)
60439+{
60440+ struct sock *sk = sock->sk;
60441+ struct sockaddr_in addr;
60442+
60443+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
60444+ addr.sin_port = inet_sk(sk)->sport;
60445+
60446+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
60447+}
60448+
60449+int
60450+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
60451+{
60452+ if (addr)
60453+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
60454+ else {
60455+ struct sockaddr_in sin;
60456+ const struct inet_sock *inet = inet_sk(sk);
60457+
60458+ sin.sin_addr.s_addr = inet->daddr;
60459+ sin.sin_port = inet->dport;
60460+
60461+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
60462+ }
60463+}
60464+
60465+int
60466+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
60467+{
60468+ struct sockaddr_in sin;
60469+
60470+ if (unlikely(skb->len < sizeof (struct udphdr)))
60471+ return 0; // skip this packet
60472+
60473+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
60474+ sin.sin_port = udp_hdr(skb)->source;
60475+
60476+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
60477+}
60478diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
60479new file mode 100644
60480index 0000000..34bdd46
60481--- /dev/null
60482+++ b/grsecurity/gracl_learn.c
60483@@ -0,0 +1,208 @@
60484+#include <linux/kernel.h>
60485+#include <linux/mm.h>
60486+#include <linux/sched.h>
60487+#include <linux/poll.h>
60488+#include <linux/smp_lock.h>
60489+#include <linux/string.h>
60490+#include <linux/file.h>
60491+#include <linux/types.h>
60492+#include <linux/vmalloc.h>
60493+#include <linux/grinternal.h>
60494+
60495+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
60496+ size_t count, loff_t *ppos);
60497+extern int gr_acl_is_enabled(void);
60498+
60499+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
60500+static int gr_learn_attached;
60501+
60502+/* use a 512k buffer */
60503+#define LEARN_BUFFER_SIZE (512 * 1024)
60504+
60505+static DEFINE_SPINLOCK(gr_learn_lock);
60506+static DEFINE_MUTEX(gr_learn_user_mutex);
60507+
60508+/* we need to maintain two buffers, so that the kernel context of grlearn
60509+ uses a semaphore around the userspace copying, and the other kernel contexts
60510+ use a spinlock when copying into the buffer, since they cannot sleep
60511+*/
60512+static char *learn_buffer;
60513+static char *learn_buffer_user;
60514+static int learn_buffer_len;
60515+static int learn_buffer_user_len;
60516+
60517+static ssize_t
60518+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
60519+{
60520+ DECLARE_WAITQUEUE(wait, current);
60521+ ssize_t retval = 0;
60522+
60523+ add_wait_queue(&learn_wait, &wait);
60524+ set_current_state(TASK_INTERRUPTIBLE);
60525+ do {
60526+ mutex_lock(&gr_learn_user_mutex);
60527+ spin_lock(&gr_learn_lock);
60528+ if (learn_buffer_len)
60529+ break;
60530+ spin_unlock(&gr_learn_lock);
60531+ mutex_unlock(&gr_learn_user_mutex);
60532+ if (file->f_flags & O_NONBLOCK) {
60533+ retval = -EAGAIN;
60534+ goto out;
60535+ }
60536+ if (signal_pending(current)) {
60537+ retval = -ERESTARTSYS;
60538+ goto out;
60539+ }
60540+
60541+ schedule();
60542+ } while (1);
60543+
60544+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
60545+ learn_buffer_user_len = learn_buffer_len;
60546+ retval = learn_buffer_len;
60547+ learn_buffer_len = 0;
60548+
60549+ spin_unlock(&gr_learn_lock);
60550+
60551+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
60552+ retval = -EFAULT;
60553+
60554+ mutex_unlock(&gr_learn_user_mutex);
60555+out:
60556+ set_current_state(TASK_RUNNING);
60557+ remove_wait_queue(&learn_wait, &wait);
60558+ return retval;
60559+}
60560+
60561+static unsigned int
60562+poll_learn(struct file * file, poll_table * wait)
60563+{
60564+ poll_wait(file, &learn_wait, wait);
60565+
60566+ if (learn_buffer_len)
60567+ return (POLLIN | POLLRDNORM);
60568+
60569+ return 0;
60570+}
60571+
60572+void
60573+gr_clear_learn_entries(void)
60574+{
60575+ char *tmp;
60576+
60577+ mutex_lock(&gr_learn_user_mutex);
60578+ spin_lock(&gr_learn_lock);
60579+ tmp = learn_buffer;
60580+ learn_buffer = NULL;
60581+ spin_unlock(&gr_learn_lock);
60582+ if (tmp)
60583+ vfree(tmp);
60584+ if (learn_buffer_user != NULL) {
60585+ vfree(learn_buffer_user);
60586+ learn_buffer_user = NULL;
60587+ }
60588+ learn_buffer_len = 0;
60589+ mutex_unlock(&gr_learn_user_mutex);
60590+
60591+ return;
60592+}
60593+
60594+void
60595+gr_add_learn_entry(const char *fmt, ...)
60596+{
60597+ va_list args;
60598+ unsigned int len;
60599+
60600+ if (!gr_learn_attached)
60601+ return;
60602+
60603+ spin_lock(&gr_learn_lock);
60604+
60605+ /* leave a gap at the end so we know when it's "full" but don't have to
60606+ compute the exact length of the string we're trying to append
60607+ */
60608+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
60609+ spin_unlock(&gr_learn_lock);
60610+ wake_up_interruptible(&learn_wait);
60611+ return;
60612+ }
60613+ if (learn_buffer == NULL) {
60614+ spin_unlock(&gr_learn_lock);
60615+ return;
60616+ }
60617+
60618+ va_start(args, fmt);
60619+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
60620+ va_end(args);
60621+
60622+ learn_buffer_len += len + 1;
60623+
60624+ spin_unlock(&gr_learn_lock);
60625+ wake_up_interruptible(&learn_wait);
60626+
60627+ return;
60628+}
60629+
60630+static int
60631+open_learn(struct inode *inode, struct file *file)
60632+{
60633+ if (file->f_mode & FMODE_READ && gr_learn_attached)
60634+ return -EBUSY;
60635+ if (file->f_mode & FMODE_READ) {
60636+ int retval = 0;
60637+ mutex_lock(&gr_learn_user_mutex);
60638+ if (learn_buffer == NULL)
60639+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
60640+ if (learn_buffer_user == NULL)
60641+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
60642+ if (learn_buffer == NULL) {
60643+ retval = -ENOMEM;
60644+ goto out_error;
60645+ }
60646+ if (learn_buffer_user == NULL) {
60647+ retval = -ENOMEM;
60648+ goto out_error;
60649+ }
60650+ learn_buffer_len = 0;
60651+ learn_buffer_user_len = 0;
60652+ gr_learn_attached = 1;
60653+out_error:
60654+ mutex_unlock(&gr_learn_user_mutex);
60655+ return retval;
60656+ }
60657+ return 0;
60658+}
60659+
60660+static int
60661+close_learn(struct inode *inode, struct file *file)
60662+{
60663+ if (file->f_mode & FMODE_READ) {
60664+ char *tmp = NULL;
60665+ mutex_lock(&gr_learn_user_mutex);
60666+ spin_lock(&gr_learn_lock);
60667+ tmp = learn_buffer;
60668+ learn_buffer = NULL;
60669+ spin_unlock(&gr_learn_lock);
60670+ if (tmp)
60671+ vfree(tmp);
60672+ if (learn_buffer_user != NULL) {
60673+ vfree(learn_buffer_user);
60674+ learn_buffer_user = NULL;
60675+ }
60676+ learn_buffer_len = 0;
60677+ learn_buffer_user_len = 0;
60678+ gr_learn_attached = 0;
60679+ mutex_unlock(&gr_learn_user_mutex);
60680+ }
60681+
60682+ return 0;
60683+}
60684+
60685+const struct file_operations grsec_fops = {
60686+ .read = read_learn,
60687+ .write = write_grsec_handler,
60688+ .open = open_learn,
60689+ .release = close_learn,
60690+ .poll = poll_learn,
60691+};
60692diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
60693new file mode 100644
60694index 0000000..70b2179
60695--- /dev/null
60696+++ b/grsecurity/gracl_res.c
60697@@ -0,0 +1,67 @@
60698+#include <linux/kernel.h>
60699+#include <linux/sched.h>
60700+#include <linux/gracl.h>
60701+#include <linux/grinternal.h>
60702+
60703+static const char *restab_log[] = {
60704+ [RLIMIT_CPU] = "RLIMIT_CPU",
60705+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
60706+ [RLIMIT_DATA] = "RLIMIT_DATA",
60707+ [RLIMIT_STACK] = "RLIMIT_STACK",
60708+ [RLIMIT_CORE] = "RLIMIT_CORE",
60709+ [RLIMIT_RSS] = "RLIMIT_RSS",
60710+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
60711+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
60712+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
60713+ [RLIMIT_AS] = "RLIMIT_AS",
60714+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
60715+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
60716+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
60717+ [RLIMIT_NICE] = "RLIMIT_NICE",
60718+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
60719+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
60720+ [GR_CRASH_RES] = "RLIMIT_CRASH"
60721+};
60722+
60723+void
60724+gr_log_resource(const struct task_struct *task,
60725+ const int res, const unsigned long wanted, const int gt)
60726+{
60727+ const struct cred *cred;
60728+ unsigned long rlim;
60729+
60730+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
60731+ return;
60732+
60733+ // not yet supported resource
60734+ if (unlikely(!restab_log[res]))
60735+ return;
60736+
60737+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
60738+ rlim = task->signal->rlim[res].rlim_max;
60739+ else
60740+ rlim = task->signal->rlim[res].rlim_cur;
60741+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
60742+ return;
60743+
60744+ rcu_read_lock();
60745+ cred = __task_cred(task);
60746+
60747+ if (res == RLIMIT_NPROC &&
60748+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
60749+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
60750+ goto out_rcu_unlock;
60751+ else if (res == RLIMIT_MEMLOCK &&
60752+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
60753+ goto out_rcu_unlock;
60754+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
60755+ goto out_rcu_unlock;
60756+ rcu_read_unlock();
60757+
60758+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
60759+
60760+ return;
60761+out_rcu_unlock:
60762+ rcu_read_unlock();
60763+ return;
60764+}
60765diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
60766new file mode 100644
60767index 0000000..1d1b734
60768--- /dev/null
60769+++ b/grsecurity/gracl_segv.c
60770@@ -0,0 +1,284 @@
60771+#include <linux/kernel.h>
60772+#include <linux/mm.h>
60773+#include <asm/uaccess.h>
60774+#include <asm/errno.h>
60775+#include <asm/mman.h>
60776+#include <net/sock.h>
60777+#include <linux/file.h>
60778+#include <linux/fs.h>
60779+#include <linux/net.h>
60780+#include <linux/in.h>
60781+#include <linux/smp_lock.h>
60782+#include <linux/slab.h>
60783+#include <linux/types.h>
60784+#include <linux/sched.h>
60785+#include <linux/timer.h>
60786+#include <linux/gracl.h>
60787+#include <linux/grsecurity.h>
60788+#include <linux/grinternal.h>
60789+
60790+static struct crash_uid *uid_set;
60791+static unsigned short uid_used;
60792+static DEFINE_SPINLOCK(gr_uid_lock);
60793+extern rwlock_t gr_inode_lock;
60794+extern struct acl_subject_label *
60795+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
60796+ struct acl_role_label *role);
60797+extern int gr_fake_force_sig(int sig, struct task_struct *t);
60798+
60799+int
60800+gr_init_uidset(void)
60801+{
60802+ uid_set =
60803+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
60804+ uid_used = 0;
60805+
60806+ return uid_set ? 1 : 0;
60807+}
60808+
60809+void
60810+gr_free_uidset(void)
60811+{
60812+ if (uid_set)
60813+ kfree(uid_set);
60814+
60815+ return;
60816+}
60817+
60818+int
60819+gr_find_uid(const uid_t uid)
60820+{
60821+ struct crash_uid *tmp = uid_set;
60822+ uid_t buid;
60823+ int low = 0, high = uid_used - 1, mid;
60824+
60825+ while (high >= low) {
60826+ mid = (low + high) >> 1;
60827+ buid = tmp[mid].uid;
60828+ if (buid == uid)
60829+ return mid;
60830+ if (buid > uid)
60831+ high = mid - 1;
60832+ if (buid < uid)
60833+ low = mid + 1;
60834+ }
60835+
60836+ return -1;
60837+}
60838+
60839+static __inline__ void
60840+gr_insertsort(void)
60841+{
60842+ unsigned short i, j;
60843+ struct crash_uid index;
60844+
60845+ for (i = 1; i < uid_used; i++) {
60846+ index = uid_set[i];
60847+ j = i;
60848+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
60849+ uid_set[j] = uid_set[j - 1];
60850+ j--;
60851+ }
60852+ uid_set[j] = index;
60853+ }
60854+
60855+ return;
60856+}
60857+
60858+static __inline__ void
60859+gr_insert_uid(const uid_t uid, const unsigned long expires)
60860+{
60861+ int loc;
60862+
60863+ if (uid_used == GR_UIDTABLE_MAX)
60864+ return;
60865+
60866+ loc = gr_find_uid(uid);
60867+
60868+ if (loc >= 0) {
60869+ uid_set[loc].expires = expires;
60870+ return;
60871+ }
60872+
60873+ uid_set[uid_used].uid = uid;
60874+ uid_set[uid_used].expires = expires;
60875+ uid_used++;
60876+
60877+ gr_insertsort();
60878+
60879+ return;
60880+}
60881+
60882+void
60883+gr_remove_uid(const unsigned short loc)
60884+{
60885+ unsigned short i;
60886+
60887+ for (i = loc + 1; i < uid_used; i++)
60888+ uid_set[i - 1] = uid_set[i];
60889+
60890+ uid_used--;
60891+
60892+ return;
60893+}
60894+
60895+int
60896+gr_check_crash_uid(const uid_t uid)
60897+{
60898+ int loc;
60899+ int ret = 0;
60900+
60901+ if (unlikely(!gr_acl_is_enabled()))
60902+ return 0;
60903+
60904+ spin_lock(&gr_uid_lock);
60905+ loc = gr_find_uid(uid);
60906+
60907+ if (loc < 0)
60908+ goto out_unlock;
60909+
60910+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
60911+ gr_remove_uid(loc);
60912+ else
60913+ ret = 1;
60914+
60915+out_unlock:
60916+ spin_unlock(&gr_uid_lock);
60917+ return ret;
60918+}
60919+
60920+static __inline__ int
60921+proc_is_setxid(const struct cred *cred)
60922+{
60923+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
60924+ cred->uid != cred->fsuid)
60925+ return 1;
60926+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
60927+ cred->gid != cred->fsgid)
60928+ return 1;
60929+
60930+ return 0;
60931+}
60932+
60933+void
60934+gr_handle_crash(struct task_struct *task, const int sig)
60935+{
60936+ struct acl_subject_label *curr;
60937+ struct task_struct *tsk, *tsk2;
60938+ const struct cred *cred;
60939+ const struct cred *cred2;
60940+
60941+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
60942+ return;
60943+
60944+ if (unlikely(!gr_acl_is_enabled()))
60945+ return;
60946+
60947+ curr = task->acl;
60948+
60949+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
60950+ return;
60951+
60952+ if (time_before_eq(curr->expires, get_seconds())) {
60953+ curr->expires = 0;
60954+ curr->crashes = 0;
60955+ }
60956+
60957+ curr->crashes++;
60958+
60959+ if (!curr->expires)
60960+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
60961+
60962+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
60963+ time_after(curr->expires, get_seconds())) {
60964+ rcu_read_lock();
60965+ cred = __task_cred(task);
60966+ if (cred->uid && proc_is_setxid(cred)) {
60967+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
60968+ spin_lock(&gr_uid_lock);
60969+ gr_insert_uid(cred->uid, curr->expires);
60970+ spin_unlock(&gr_uid_lock);
60971+ curr->expires = 0;
60972+ curr->crashes = 0;
60973+ read_lock(&tasklist_lock);
60974+ do_each_thread(tsk2, tsk) {
60975+ cred2 = __task_cred(tsk);
60976+ if (tsk != task && cred2->uid == cred->uid)
60977+ gr_fake_force_sig(SIGKILL, tsk);
60978+ } while_each_thread(tsk2, tsk);
60979+ read_unlock(&tasklist_lock);
60980+ } else {
60981+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
60982+ read_lock(&tasklist_lock);
60983+ read_lock(&grsec_exec_file_lock);
60984+ do_each_thread(tsk2, tsk) {
60985+ if (likely(tsk != task)) {
60986+ // if this thread has the same subject as the one that triggered
60987+ // RES_CRASH and it's the same binary, kill it
60988+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
60989+ gr_fake_force_sig(SIGKILL, tsk);
60990+ }
60991+ } while_each_thread(tsk2, tsk);
60992+ read_unlock(&grsec_exec_file_lock);
60993+ read_unlock(&tasklist_lock);
60994+ }
60995+ rcu_read_unlock();
60996+ }
60997+
60998+ return;
60999+}
61000+
61001+int
61002+gr_check_crash_exec(const struct file *filp)
61003+{
61004+ struct acl_subject_label *curr;
61005+
61006+ if (unlikely(!gr_acl_is_enabled()))
61007+ return 0;
61008+
61009+ read_lock(&gr_inode_lock);
61010+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
61011+ filp->f_path.dentry->d_inode->i_sb->s_dev,
61012+ current->role);
61013+ read_unlock(&gr_inode_lock);
61014+
61015+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
61016+ (!curr->crashes && !curr->expires))
61017+ return 0;
61018+
61019+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61020+ time_after(curr->expires, get_seconds()))
61021+ return 1;
61022+ else if (time_before_eq(curr->expires, get_seconds())) {
61023+ curr->crashes = 0;
61024+ curr->expires = 0;
61025+ }
61026+
61027+ return 0;
61028+}
61029+
61030+void
61031+gr_handle_alertkill(struct task_struct *task)
61032+{
61033+ struct acl_subject_label *curracl;
61034+ __u32 curr_ip;
61035+ struct task_struct *p, *p2;
61036+
61037+ if (unlikely(!gr_acl_is_enabled()))
61038+ return;
61039+
61040+ curracl = task->acl;
61041+ curr_ip = task->signal->curr_ip;
61042+
61043+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
61044+ read_lock(&tasklist_lock);
61045+ do_each_thread(p2, p) {
61046+ if (p->signal->curr_ip == curr_ip)
61047+ gr_fake_force_sig(SIGKILL, p);
61048+ } while_each_thread(p2, p);
61049+ read_unlock(&tasklist_lock);
61050+ } else if (curracl->mode & GR_KILLPROC)
61051+ gr_fake_force_sig(SIGKILL, task);
61052+
61053+ return;
61054+}
61055diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
61056new file mode 100644
61057index 0000000..9d83a69
61058--- /dev/null
61059+++ b/grsecurity/gracl_shm.c
61060@@ -0,0 +1,40 @@
61061+#include <linux/kernel.h>
61062+#include <linux/mm.h>
61063+#include <linux/sched.h>
61064+#include <linux/file.h>
61065+#include <linux/ipc.h>
61066+#include <linux/gracl.h>
61067+#include <linux/grsecurity.h>
61068+#include <linux/grinternal.h>
61069+
61070+int
61071+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61072+ const time_t shm_createtime, const uid_t cuid, const int shmid)
61073+{
61074+ struct task_struct *task;
61075+
61076+ if (!gr_acl_is_enabled())
61077+ return 1;
61078+
61079+ rcu_read_lock();
61080+ read_lock(&tasklist_lock);
61081+
61082+ task = find_task_by_vpid(shm_cprid);
61083+
61084+ if (unlikely(!task))
61085+ task = find_task_by_vpid(shm_lapid);
61086+
61087+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
61088+ (task->pid == shm_lapid)) &&
61089+ (task->acl->mode & GR_PROTSHM) &&
61090+ (task->acl != current->acl))) {
61091+ read_unlock(&tasklist_lock);
61092+ rcu_read_unlock();
61093+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
61094+ return 0;
61095+ }
61096+ read_unlock(&tasklist_lock);
61097+ rcu_read_unlock();
61098+
61099+ return 1;
61100+}
61101diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
61102new file mode 100644
61103index 0000000..bc0be01
61104--- /dev/null
61105+++ b/grsecurity/grsec_chdir.c
61106@@ -0,0 +1,19 @@
61107+#include <linux/kernel.h>
61108+#include <linux/sched.h>
61109+#include <linux/fs.h>
61110+#include <linux/file.h>
61111+#include <linux/grsecurity.h>
61112+#include <linux/grinternal.h>
61113+
61114+void
61115+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
61116+{
61117+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
61118+ if ((grsec_enable_chdir && grsec_enable_group &&
61119+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
61120+ !grsec_enable_group)) {
61121+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
61122+ }
61123+#endif
61124+ return;
61125+}
61126diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
61127new file mode 100644
61128index 0000000..197bdd5
61129--- /dev/null
61130+++ b/grsecurity/grsec_chroot.c
61131@@ -0,0 +1,386 @@
61132+#include <linux/kernel.h>
61133+#include <linux/module.h>
61134+#include <linux/sched.h>
61135+#include <linux/file.h>
61136+#include <linux/fs.h>
61137+#include <linux/mount.h>
61138+#include <linux/types.h>
61139+#include <linux/pid_namespace.h>
61140+#include <linux/grsecurity.h>
61141+#include <linux/grinternal.h>
61142+
61143+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
61144+{
61145+#ifdef CONFIG_GRKERNSEC
61146+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
61147+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
61148+ task->gr_is_chrooted = 1;
61149+ else
61150+ task->gr_is_chrooted = 0;
61151+
61152+ task->gr_chroot_dentry = path->dentry;
61153+#endif
61154+ return;
61155+}
61156+
61157+void gr_clear_chroot_entries(struct task_struct *task)
61158+{
61159+#ifdef CONFIG_GRKERNSEC
61160+ task->gr_is_chrooted = 0;
61161+ task->gr_chroot_dentry = NULL;
61162+#endif
61163+ return;
61164+}
61165+
61166+int
61167+gr_handle_chroot_unix(const pid_t pid)
61168+{
61169+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
61170+ struct task_struct *p;
61171+
61172+ if (unlikely(!grsec_enable_chroot_unix))
61173+ return 1;
61174+
61175+ if (likely(!proc_is_chrooted(current)))
61176+ return 1;
61177+
61178+ rcu_read_lock();
61179+ read_lock(&tasklist_lock);
61180+
61181+ p = find_task_by_vpid_unrestricted(pid);
61182+ if (unlikely(p && !have_same_root(current, p))) {
61183+ read_unlock(&tasklist_lock);
61184+ rcu_read_unlock();
61185+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
61186+ return 0;
61187+ }
61188+ read_unlock(&tasklist_lock);
61189+ rcu_read_unlock();
61190+#endif
61191+ return 1;
61192+}
61193+
61194+int
61195+gr_handle_chroot_nice(void)
61196+{
61197+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61198+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
61199+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
61200+ return -EPERM;
61201+ }
61202+#endif
61203+ return 0;
61204+}
61205+
61206+int
61207+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
61208+{
61209+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61210+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
61211+ && proc_is_chrooted(current)) {
61212+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
61213+ return -EACCES;
61214+ }
61215+#endif
61216+ return 0;
61217+}
61218+
61219+int
61220+gr_handle_chroot_rawio(const struct inode *inode)
61221+{
61222+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61223+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
61224+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
61225+ return 1;
61226+#endif
61227+ return 0;
61228+}
61229+
61230+int
61231+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
61232+{
61233+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61234+ struct task_struct *p;
61235+ int ret = 0;
61236+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
61237+ return ret;
61238+
61239+ read_lock(&tasklist_lock);
61240+ do_each_pid_task(pid, type, p) {
61241+ if (!have_same_root(current, p)) {
61242+ ret = 1;
61243+ goto out;
61244+ }
61245+ } while_each_pid_task(pid, type, p);
61246+out:
61247+ read_unlock(&tasklist_lock);
61248+ return ret;
61249+#endif
61250+ return 0;
61251+}
61252+
61253+int
61254+gr_pid_is_chrooted(struct task_struct *p)
61255+{
61256+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61257+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
61258+ return 0;
61259+
61260+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
61261+ !have_same_root(current, p)) {
61262+ return 1;
61263+ }
61264+#endif
61265+ return 0;
61266+}
61267+
61268+EXPORT_SYMBOL(gr_pid_is_chrooted);
61269+
61270+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
61271+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
61272+{
61273+ struct dentry *dentry = (struct dentry *)u_dentry;
61274+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
61275+ struct dentry *realroot;
61276+ struct vfsmount *realrootmnt;
61277+ struct dentry *currentroot;
61278+ struct vfsmount *currentmnt;
61279+ struct task_struct *reaper = &init_task;
61280+ int ret = 1;
61281+
61282+ read_lock(&reaper->fs->lock);
61283+ realrootmnt = mntget(reaper->fs->root.mnt);
61284+ realroot = dget(reaper->fs->root.dentry);
61285+ read_unlock(&reaper->fs->lock);
61286+
61287+ read_lock(&current->fs->lock);
61288+ currentmnt = mntget(current->fs->root.mnt);
61289+ currentroot = dget(current->fs->root.dentry);
61290+ read_unlock(&current->fs->lock);
61291+
61292+ spin_lock(&dcache_lock);
61293+ for (;;) {
61294+ if (unlikely((dentry == realroot && mnt == realrootmnt)
61295+ || (dentry == currentroot && mnt == currentmnt)))
61296+ break;
61297+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
61298+ if (mnt->mnt_parent == mnt)
61299+ break;
61300+ dentry = mnt->mnt_mountpoint;
61301+ mnt = mnt->mnt_parent;
61302+ continue;
61303+ }
61304+ dentry = dentry->d_parent;
61305+ }
61306+ spin_unlock(&dcache_lock);
61307+
61308+ dput(currentroot);
61309+ mntput(currentmnt);
61310+
61311+ /* access is outside of chroot */
61312+ if (dentry == realroot && mnt == realrootmnt)
61313+ ret = 0;
61314+
61315+ dput(realroot);
61316+ mntput(realrootmnt);
61317+ return ret;
61318+}
61319+#endif
61320+
61321+int
61322+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
61323+{
61324+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
61325+ if (!grsec_enable_chroot_fchdir)
61326+ return 1;
61327+
61328+ if (!proc_is_chrooted(current))
61329+ return 1;
61330+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
61331+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
61332+ return 0;
61333+ }
61334+#endif
61335+ return 1;
61336+}
61337+
61338+int
61339+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61340+ const time_t shm_createtime)
61341+{
61342+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
61343+ struct task_struct *p;
61344+ time_t starttime;
61345+
61346+ if (unlikely(!grsec_enable_chroot_shmat))
61347+ return 1;
61348+
61349+ if (likely(!proc_is_chrooted(current)))
61350+ return 1;
61351+
61352+ rcu_read_lock();
61353+ read_lock(&tasklist_lock);
61354+
61355+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
61356+ starttime = p->start_time.tv_sec;
61357+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
61358+ if (have_same_root(current, p)) {
61359+ goto allow;
61360+ } else {
61361+ read_unlock(&tasklist_lock);
61362+ rcu_read_unlock();
61363+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
61364+ return 0;
61365+ }
61366+ }
61367+ /* creator exited, pid reuse, fall through to next check */
61368+ }
61369+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
61370+ if (unlikely(!have_same_root(current, p))) {
61371+ read_unlock(&tasklist_lock);
61372+ rcu_read_unlock();
61373+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
61374+ return 0;
61375+ }
61376+ }
61377+
61378+allow:
61379+ read_unlock(&tasklist_lock);
61380+ rcu_read_unlock();
61381+#endif
61382+ return 1;
61383+}
61384+
61385+void
61386+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
61387+{
61388+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
61389+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
61390+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
61391+#endif
61392+ return;
61393+}
61394+
61395+int
61396+gr_handle_chroot_mknod(const struct dentry *dentry,
61397+ const struct vfsmount *mnt, const int mode)
61398+{
61399+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
61400+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
61401+ proc_is_chrooted(current)) {
61402+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
61403+ return -EPERM;
61404+ }
61405+#endif
61406+ return 0;
61407+}
61408+
61409+int
61410+gr_handle_chroot_mount(const struct dentry *dentry,
61411+ const struct vfsmount *mnt, const char *dev_name)
61412+{
61413+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
61414+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
61415+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
61416+ return -EPERM;
61417+ }
61418+#endif
61419+ return 0;
61420+}
61421+
61422+int
61423+gr_handle_chroot_pivot(void)
61424+{
61425+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
61426+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
61427+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
61428+ return -EPERM;
61429+ }
61430+#endif
61431+ return 0;
61432+}
61433+
61434+int
61435+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
61436+{
61437+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
61438+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
61439+ !gr_is_outside_chroot(dentry, mnt)) {
61440+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
61441+ return -EPERM;
61442+ }
61443+#endif
61444+ return 0;
61445+}
61446+
61447+extern const char *captab_log[];
61448+extern int captab_log_entries;
61449+
61450+int
61451+gr_chroot_is_capable(const int cap)
61452+{
61453+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61454+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
61455+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
61456+ if (cap_raised(chroot_caps, cap)) {
61457+ const struct cred *creds = current_cred();
61458+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
61459+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
61460+ }
61461+ return 0;
61462+ }
61463+ }
61464+#endif
61465+ return 1;
61466+}
61467+
61468+int
61469+gr_chroot_is_capable_nolog(const int cap)
61470+{
61471+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61472+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
61473+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
61474+ if (cap_raised(chroot_caps, cap)) {
61475+ return 0;
61476+ }
61477+ }
61478+#endif
61479+ return 1;
61480+}
61481+
61482+int
61483+gr_handle_chroot_sysctl(const int op)
61484+{
61485+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
61486+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
61487+ && (op & MAY_WRITE))
61488+ return -EACCES;
61489+#endif
61490+ return 0;
61491+}
61492+
61493+void
61494+gr_handle_chroot_chdir(struct path *path)
61495+{
61496+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
61497+ if (grsec_enable_chroot_chdir)
61498+ set_fs_pwd(current->fs, path);
61499+#endif
61500+ return;
61501+}
61502+
61503+int
61504+gr_handle_chroot_chmod(const struct dentry *dentry,
61505+ const struct vfsmount *mnt, const int mode)
61506+{
61507+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
61508+ /* allow chmod +s on directories, but not on files */
61509+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
61510+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
61511+ proc_is_chrooted(current)) {
61512+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
61513+ return -EPERM;
61514+ }
61515+#endif
61516+ return 0;
61517+}
61518diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
61519new file mode 100644
61520index 0000000..b81db5b
61521--- /dev/null
61522+++ b/grsecurity/grsec_disabled.c
61523@@ -0,0 +1,439 @@
61524+#include <linux/kernel.h>
61525+#include <linux/module.h>
61526+#include <linux/sched.h>
61527+#include <linux/file.h>
61528+#include <linux/fs.h>
61529+#include <linux/kdev_t.h>
61530+#include <linux/net.h>
61531+#include <linux/in.h>
61532+#include <linux/ip.h>
61533+#include <linux/skbuff.h>
61534+#include <linux/sysctl.h>
61535+
61536+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
61537+void
61538+pax_set_initial_flags(struct linux_binprm *bprm)
61539+{
61540+ return;
61541+}
61542+#endif
61543+
61544+#ifdef CONFIG_SYSCTL
61545+__u32
61546+gr_handle_sysctl(const struct ctl_table * table, const int op)
61547+{
61548+ return 0;
61549+}
61550+#endif
61551+
61552+#ifdef CONFIG_TASKSTATS
61553+int gr_is_taskstats_denied(int pid)
61554+{
61555+ return 0;
61556+}
61557+#endif
61558+
61559+int
61560+gr_acl_is_enabled(void)
61561+{
61562+ return 0;
61563+}
61564+
61565+void
61566+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
61567+{
61568+ return;
61569+}
61570+
61571+int
61572+gr_handle_rawio(const struct inode *inode)
61573+{
61574+ return 0;
61575+}
61576+
61577+void
61578+gr_acl_handle_psacct(struct task_struct *task, const long code)
61579+{
61580+ return;
61581+}
61582+
61583+int
61584+gr_handle_ptrace(struct task_struct *task, const long request)
61585+{
61586+ return 0;
61587+}
61588+
61589+int
61590+gr_handle_proc_ptrace(struct task_struct *task)
61591+{
61592+ return 0;
61593+}
61594+
61595+void
61596+gr_learn_resource(const struct task_struct *task,
61597+ const int res, const unsigned long wanted, const int gt)
61598+{
61599+ return;
61600+}
61601+
61602+int
61603+gr_set_acls(const int type)
61604+{
61605+ return 0;
61606+}
61607+
61608+int
61609+gr_check_hidden_task(const struct task_struct *tsk)
61610+{
61611+ return 0;
61612+}
61613+
61614+int
61615+gr_check_protected_task(const struct task_struct *task)
61616+{
61617+ return 0;
61618+}
61619+
61620+int
61621+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
61622+{
61623+ return 0;
61624+}
61625+
61626+void
61627+gr_copy_label(struct task_struct *tsk)
61628+{
61629+ return;
61630+}
61631+
61632+void
61633+gr_set_pax_flags(struct task_struct *task)
61634+{
61635+ return;
61636+}
61637+
61638+int
61639+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
61640+ const int unsafe_share)
61641+{
61642+ return 0;
61643+}
61644+
61645+void
61646+gr_handle_delete(const ino_t ino, const dev_t dev)
61647+{
61648+ return;
61649+}
61650+
61651+void
61652+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
61653+{
61654+ return;
61655+}
61656+
61657+void
61658+gr_handle_crash(struct task_struct *task, const int sig)
61659+{
61660+ return;
61661+}
61662+
61663+int
61664+gr_check_crash_exec(const struct file *filp)
61665+{
61666+ return 0;
61667+}
61668+
61669+int
61670+gr_check_crash_uid(const uid_t uid)
61671+{
61672+ return 0;
61673+}
61674+
61675+void
61676+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61677+ struct dentry *old_dentry,
61678+ struct dentry *new_dentry,
61679+ struct vfsmount *mnt, const __u8 replace)
61680+{
61681+ return;
61682+}
61683+
61684+int
61685+gr_search_socket(const int family, const int type, const int protocol)
61686+{
61687+ return 1;
61688+}
61689+
61690+int
61691+gr_search_connectbind(const int mode, const struct socket *sock,
61692+ const struct sockaddr_in *addr)
61693+{
61694+ return 0;
61695+}
61696+
61697+void
61698+gr_handle_alertkill(struct task_struct *task)
61699+{
61700+ return;
61701+}
61702+
61703+__u32
61704+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
61705+{
61706+ return 1;
61707+}
61708+
61709+__u32
61710+gr_acl_handle_hidden_file(const struct dentry * dentry,
61711+ const struct vfsmount * mnt)
61712+{
61713+ return 1;
61714+}
61715+
61716+__u32
61717+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
61718+ int acc_mode)
61719+{
61720+ return 1;
61721+}
61722+
61723+__u32
61724+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
61725+{
61726+ return 1;
61727+}
61728+
61729+__u32
61730+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
61731+{
61732+ return 1;
61733+}
61734+
61735+int
61736+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
61737+ unsigned int *vm_flags)
61738+{
61739+ return 1;
61740+}
61741+
61742+__u32
61743+gr_acl_handle_truncate(const struct dentry * dentry,
61744+ const struct vfsmount * mnt)
61745+{
61746+ return 1;
61747+}
61748+
61749+__u32
61750+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
61751+{
61752+ return 1;
61753+}
61754+
61755+__u32
61756+gr_acl_handle_access(const struct dentry * dentry,
61757+ const struct vfsmount * mnt, const int fmode)
61758+{
61759+ return 1;
61760+}
61761+
61762+__u32
61763+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
61764+ mode_t mode)
61765+{
61766+ return 1;
61767+}
61768+
61769+__u32
61770+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
61771+ mode_t mode)
61772+{
61773+ return 1;
61774+}
61775+
61776+__u32
61777+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
61778+{
61779+ return 1;
61780+}
61781+
61782+__u32
61783+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
61784+{
61785+ return 1;
61786+}
61787+
61788+void
61789+grsecurity_init(void)
61790+{
61791+ return;
61792+}
61793+
61794+__u32
61795+gr_acl_handle_mknod(const struct dentry * new_dentry,
61796+ const struct dentry * parent_dentry,
61797+ const struct vfsmount * parent_mnt,
61798+ const int mode)
61799+{
61800+ return 1;
61801+}
61802+
61803+__u32
61804+gr_acl_handle_mkdir(const struct dentry * new_dentry,
61805+ const struct dentry * parent_dentry,
61806+ const struct vfsmount * parent_mnt)
61807+{
61808+ return 1;
61809+}
61810+
61811+__u32
61812+gr_acl_handle_symlink(const struct dentry * new_dentry,
61813+ const struct dentry * parent_dentry,
61814+ const struct vfsmount * parent_mnt, const char *from)
61815+{
61816+ return 1;
61817+}
61818+
61819+__u32
61820+gr_acl_handle_link(const struct dentry * new_dentry,
61821+ const struct dentry * parent_dentry,
61822+ const struct vfsmount * parent_mnt,
61823+ const struct dentry * old_dentry,
61824+ const struct vfsmount * old_mnt, const char *to)
61825+{
61826+ return 1;
61827+}
61828+
61829+int
61830+gr_acl_handle_rename(const struct dentry *new_dentry,
61831+ const struct dentry *parent_dentry,
61832+ const struct vfsmount *parent_mnt,
61833+ const struct dentry *old_dentry,
61834+ const struct inode *old_parent_inode,
61835+ const struct vfsmount *old_mnt, const char *newname)
61836+{
61837+ return 0;
61838+}
61839+
61840+int
61841+gr_acl_handle_filldir(const struct file *file, const char *name,
61842+ const int namelen, const ino_t ino)
61843+{
61844+ return 1;
61845+}
61846+
61847+int
61848+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61849+ const time_t shm_createtime, const uid_t cuid, const int shmid)
61850+{
61851+ return 1;
61852+}
61853+
61854+int
61855+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
61856+{
61857+ return 0;
61858+}
61859+
61860+int
61861+gr_search_accept(const struct socket *sock)
61862+{
61863+ return 0;
61864+}
61865+
61866+int
61867+gr_search_listen(const struct socket *sock)
61868+{
61869+ return 0;
61870+}
61871+
61872+int
61873+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
61874+{
61875+ return 0;
61876+}
61877+
61878+__u32
61879+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
61880+{
61881+ return 1;
61882+}
61883+
61884+__u32
61885+gr_acl_handle_creat(const struct dentry * dentry,
61886+ const struct dentry * p_dentry,
61887+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
61888+ const int imode)
61889+{
61890+ return 1;
61891+}
61892+
61893+void
61894+gr_acl_handle_exit(void)
61895+{
61896+ return;
61897+}
61898+
61899+int
61900+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
61901+{
61902+ return 1;
61903+}
61904+
61905+void
61906+gr_set_role_label(const uid_t uid, const gid_t gid)
61907+{
61908+ return;
61909+}
61910+
61911+int
61912+gr_acl_handle_procpidmem(const struct task_struct *task)
61913+{
61914+ return 0;
61915+}
61916+
61917+int
61918+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
61919+{
61920+ return 0;
61921+}
61922+
61923+int
61924+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
61925+{
61926+ return 0;
61927+}
61928+
61929+void
61930+gr_set_kernel_label(struct task_struct *task)
61931+{
61932+ return;
61933+}
61934+
61935+int
61936+gr_check_user_change(int real, int effective, int fs)
61937+{
61938+ return 0;
61939+}
61940+
61941+int
61942+gr_check_group_change(int real, int effective, int fs)
61943+{
61944+ return 0;
61945+}
61946+
61947+int gr_acl_enable_at_secure(void)
61948+{
61949+ return 0;
61950+}
61951+
61952+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
61953+{
61954+ return dentry->d_inode->i_sb->s_dev;
61955+}
61956+
61957+EXPORT_SYMBOL(gr_learn_resource);
61958+EXPORT_SYMBOL(gr_set_kernel_label);
61959+#ifdef CONFIG_SECURITY
61960+EXPORT_SYMBOL(gr_check_user_change);
61961+EXPORT_SYMBOL(gr_check_group_change);
61962+#endif
61963diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
61964new file mode 100644
61965index 0000000..a96e155
61966--- /dev/null
61967+++ b/grsecurity/grsec_exec.c
61968@@ -0,0 +1,204 @@
61969+#include <linux/kernel.h>
61970+#include <linux/sched.h>
61971+#include <linux/file.h>
61972+#include <linux/binfmts.h>
61973+#include <linux/smp_lock.h>
61974+#include <linux/fs.h>
61975+#include <linux/types.h>
61976+#include <linux/grdefs.h>
61977+#include <linux/grinternal.h>
61978+#include <linux/capability.h>
61979+#include <linux/compat.h>
61980+#include <linux/module.h>
61981+
61982+#include <asm/uaccess.h>
61983+
61984+#ifdef CONFIG_GRKERNSEC_EXECLOG
61985+static char gr_exec_arg_buf[132];
61986+static DEFINE_MUTEX(gr_exec_arg_mutex);
61987+#endif
61988+
61989+void
61990+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
61991+{
61992+#ifdef CONFIG_GRKERNSEC_EXECLOG
61993+ char *grarg = gr_exec_arg_buf;
61994+ unsigned int i, x, execlen = 0;
61995+ char c;
61996+
61997+ if (!((grsec_enable_execlog && grsec_enable_group &&
61998+ in_group_p(grsec_audit_gid))
61999+ || (grsec_enable_execlog && !grsec_enable_group)))
62000+ return;
62001+
62002+ mutex_lock(&gr_exec_arg_mutex);
62003+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
62004+
62005+ if (unlikely(argv == NULL))
62006+ goto log;
62007+
62008+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
62009+ const char __user *p;
62010+ unsigned int len;
62011+
62012+ if (copy_from_user(&p, argv + i, sizeof(p)))
62013+ goto log;
62014+ if (!p)
62015+ goto log;
62016+ len = strnlen_user(p, 128 - execlen);
62017+ if (len > 128 - execlen)
62018+ len = 128 - execlen;
62019+ else if (len > 0)
62020+ len--;
62021+ if (copy_from_user(grarg + execlen, p, len))
62022+ goto log;
62023+
62024+ /* rewrite unprintable characters */
62025+ for (x = 0; x < len; x++) {
62026+ c = *(grarg + execlen + x);
62027+ if (c < 32 || c > 126)
62028+ *(grarg + execlen + x) = ' ';
62029+ }
62030+
62031+ execlen += len;
62032+ *(grarg + execlen) = ' ';
62033+ *(grarg + execlen + 1) = '\0';
62034+ execlen++;
62035+ }
62036+
62037+ log:
62038+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62039+ bprm->file->f_path.mnt, grarg);
62040+ mutex_unlock(&gr_exec_arg_mutex);
62041+#endif
62042+ return;
62043+}
62044+
62045+#ifdef CONFIG_COMPAT
62046+void
62047+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
62048+{
62049+#ifdef CONFIG_GRKERNSEC_EXECLOG
62050+ char *grarg = gr_exec_arg_buf;
62051+ unsigned int i, x, execlen = 0;
62052+ char c;
62053+
62054+ if (!((grsec_enable_execlog && grsec_enable_group &&
62055+ in_group_p(grsec_audit_gid))
62056+ || (grsec_enable_execlog && !grsec_enable_group)))
62057+ return;
62058+
62059+ mutex_lock(&gr_exec_arg_mutex);
62060+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
62061+
62062+ if (unlikely(argv == NULL))
62063+ goto log;
62064+
62065+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
62066+ compat_uptr_t p;
62067+ unsigned int len;
62068+
62069+ if (get_user(p, argv + i))
62070+ goto log;
62071+ len = strnlen_user(compat_ptr(p), 128 - execlen);
62072+ if (len > 128 - execlen)
62073+ len = 128 - execlen;
62074+ else if (len > 0)
62075+ len--;
62076+ else
62077+ goto log;
62078+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
62079+ goto log;
62080+
62081+ /* rewrite unprintable characters */
62082+ for (x = 0; x < len; x++) {
62083+ c = *(grarg + execlen + x);
62084+ if (c < 32 || c > 126)
62085+ *(grarg + execlen + x) = ' ';
62086+ }
62087+
62088+ execlen += len;
62089+ *(grarg + execlen) = ' ';
62090+ *(grarg + execlen + 1) = '\0';
62091+ execlen++;
62092+ }
62093+
62094+ log:
62095+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62096+ bprm->file->f_path.mnt, grarg);
62097+ mutex_unlock(&gr_exec_arg_mutex);
62098+#endif
62099+ return;
62100+}
62101+#endif
62102+
62103+#ifdef CONFIG_GRKERNSEC
62104+extern int gr_acl_is_capable(const int cap);
62105+extern int gr_acl_is_capable_nolog(const int cap);
62106+extern int gr_chroot_is_capable(const int cap);
62107+extern int gr_chroot_is_capable_nolog(const int cap);
62108+#endif
62109+
62110+const char *captab_log[] = {
62111+ "CAP_CHOWN",
62112+ "CAP_DAC_OVERRIDE",
62113+ "CAP_DAC_READ_SEARCH",
62114+ "CAP_FOWNER",
62115+ "CAP_FSETID",
62116+ "CAP_KILL",
62117+ "CAP_SETGID",
62118+ "CAP_SETUID",
62119+ "CAP_SETPCAP",
62120+ "CAP_LINUX_IMMUTABLE",
62121+ "CAP_NET_BIND_SERVICE",
62122+ "CAP_NET_BROADCAST",
62123+ "CAP_NET_ADMIN",
62124+ "CAP_NET_RAW",
62125+ "CAP_IPC_LOCK",
62126+ "CAP_IPC_OWNER",
62127+ "CAP_SYS_MODULE",
62128+ "CAP_SYS_RAWIO",
62129+ "CAP_SYS_CHROOT",
62130+ "CAP_SYS_PTRACE",
62131+ "CAP_SYS_PACCT",
62132+ "CAP_SYS_ADMIN",
62133+ "CAP_SYS_BOOT",
62134+ "CAP_SYS_NICE",
62135+ "CAP_SYS_RESOURCE",
62136+ "CAP_SYS_TIME",
62137+ "CAP_SYS_TTY_CONFIG",
62138+ "CAP_MKNOD",
62139+ "CAP_LEASE",
62140+ "CAP_AUDIT_WRITE",
62141+ "CAP_AUDIT_CONTROL",
62142+ "CAP_SETFCAP",
62143+ "CAP_MAC_OVERRIDE",
62144+ "CAP_MAC_ADMIN"
62145+};
62146+
62147+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
62148+
62149+int gr_is_capable(const int cap)
62150+{
62151+#ifdef CONFIG_GRKERNSEC
62152+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
62153+ return 1;
62154+ return 0;
62155+#else
62156+ return 1;
62157+#endif
62158+}
62159+
62160+int gr_is_capable_nolog(const int cap)
62161+{
62162+#ifdef CONFIG_GRKERNSEC
62163+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
62164+ return 1;
62165+ return 0;
62166+#else
62167+ return 1;
62168+#endif
62169+}
62170+
62171+EXPORT_SYMBOL(gr_is_capable);
62172+EXPORT_SYMBOL(gr_is_capable_nolog);
62173diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
62174new file mode 100644
62175index 0000000..d3ee748
62176--- /dev/null
62177+++ b/grsecurity/grsec_fifo.c
62178@@ -0,0 +1,24 @@
62179+#include <linux/kernel.h>
62180+#include <linux/sched.h>
62181+#include <linux/fs.h>
62182+#include <linux/file.h>
62183+#include <linux/grinternal.h>
62184+
62185+int
62186+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
62187+ const struct dentry *dir, const int flag, const int acc_mode)
62188+{
62189+#ifdef CONFIG_GRKERNSEC_FIFO
62190+ const struct cred *cred = current_cred();
62191+
62192+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
62193+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
62194+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
62195+ (cred->fsuid != dentry->d_inode->i_uid)) {
62196+ if (!inode_permission(dentry->d_inode, acc_mode))
62197+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
62198+ return -EACCES;
62199+ }
62200+#endif
62201+ return 0;
62202+}
62203diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
62204new file mode 100644
62205index 0000000..8ca18bf
62206--- /dev/null
62207+++ b/grsecurity/grsec_fork.c
62208@@ -0,0 +1,23 @@
62209+#include <linux/kernel.h>
62210+#include <linux/sched.h>
62211+#include <linux/grsecurity.h>
62212+#include <linux/grinternal.h>
62213+#include <linux/errno.h>
62214+
62215+void
62216+gr_log_forkfail(const int retval)
62217+{
62218+#ifdef CONFIG_GRKERNSEC_FORKFAIL
62219+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
62220+ switch (retval) {
62221+ case -EAGAIN:
62222+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
62223+ break;
62224+ case -ENOMEM:
62225+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
62226+ break;
62227+ }
62228+ }
62229+#endif
62230+ return;
62231+}
62232diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
62233new file mode 100644
62234index 0000000..f813c26
62235--- /dev/null
62236+++ b/grsecurity/grsec_init.c
62237@@ -0,0 +1,270 @@
62238+#include <linux/kernel.h>
62239+#include <linux/sched.h>
62240+#include <linux/mm.h>
62241+#include <linux/smp_lock.h>
62242+#include <linux/gracl.h>
62243+#include <linux/slab.h>
62244+#include <linux/vmalloc.h>
62245+#include <linux/percpu.h>
62246+#include <linux/module.h>
62247+
62248+int grsec_enable_brute;
62249+int grsec_enable_link;
62250+int grsec_enable_dmesg;
62251+int grsec_enable_harden_ptrace;
62252+int grsec_enable_fifo;
62253+int grsec_enable_execlog;
62254+int grsec_enable_signal;
62255+int grsec_enable_forkfail;
62256+int grsec_enable_audit_ptrace;
62257+int grsec_enable_time;
62258+int grsec_enable_audit_textrel;
62259+int grsec_enable_group;
62260+int grsec_audit_gid;
62261+int grsec_enable_chdir;
62262+int grsec_enable_mount;
62263+int grsec_enable_rofs;
62264+int grsec_enable_chroot_findtask;
62265+int grsec_enable_chroot_mount;
62266+int grsec_enable_chroot_shmat;
62267+int grsec_enable_chroot_fchdir;
62268+int grsec_enable_chroot_double;
62269+int grsec_enable_chroot_pivot;
62270+int grsec_enable_chroot_chdir;
62271+int grsec_enable_chroot_chmod;
62272+int grsec_enable_chroot_mknod;
62273+int grsec_enable_chroot_nice;
62274+int grsec_enable_chroot_execlog;
62275+int grsec_enable_chroot_caps;
62276+int grsec_enable_chroot_sysctl;
62277+int grsec_enable_chroot_unix;
62278+int grsec_enable_tpe;
62279+int grsec_tpe_gid;
62280+int grsec_enable_blackhole;
62281+#ifdef CONFIG_IPV6_MODULE
62282+EXPORT_SYMBOL(grsec_enable_blackhole);
62283+#endif
62284+int grsec_lastack_retries;
62285+int grsec_enable_tpe_all;
62286+int grsec_enable_tpe_invert;
62287+int grsec_enable_socket_all;
62288+int grsec_socket_all_gid;
62289+int grsec_enable_socket_client;
62290+int grsec_socket_client_gid;
62291+int grsec_enable_socket_server;
62292+int grsec_socket_server_gid;
62293+int grsec_resource_logging;
62294+int grsec_disable_privio;
62295+int grsec_enable_log_rwxmaps;
62296+int grsec_lock;
62297+
62298+DEFINE_SPINLOCK(grsec_alert_lock);
62299+unsigned long grsec_alert_wtime = 0;
62300+unsigned long grsec_alert_fyet = 0;
62301+
62302+DEFINE_SPINLOCK(grsec_audit_lock);
62303+
62304+DEFINE_RWLOCK(grsec_exec_file_lock);
62305+
62306+char *gr_shared_page[4];
62307+
62308+char *gr_alert_log_fmt;
62309+char *gr_audit_log_fmt;
62310+char *gr_alert_log_buf;
62311+char *gr_audit_log_buf;
62312+
62313+extern struct gr_arg *gr_usermode;
62314+extern unsigned char *gr_system_salt;
62315+extern unsigned char *gr_system_sum;
62316+
62317+void __init
62318+grsecurity_init(void)
62319+{
62320+ int j;
62321+ /* create the per-cpu shared pages */
62322+
62323+#ifdef CONFIG_X86
62324+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
62325+#endif
62326+
62327+ for (j = 0; j < 4; j++) {
62328+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
62329+ if (gr_shared_page[j] == NULL) {
62330+ panic("Unable to allocate grsecurity shared page");
62331+ return;
62332+ }
62333+ }
62334+
62335+ /* allocate log buffers */
62336+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
62337+ if (!gr_alert_log_fmt) {
62338+ panic("Unable to allocate grsecurity alert log format buffer");
62339+ return;
62340+ }
62341+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
62342+ if (!gr_audit_log_fmt) {
62343+ panic("Unable to allocate grsecurity audit log format buffer");
62344+ return;
62345+ }
62346+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
62347+ if (!gr_alert_log_buf) {
62348+ panic("Unable to allocate grsecurity alert log buffer");
62349+ return;
62350+ }
62351+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
62352+ if (!gr_audit_log_buf) {
62353+ panic("Unable to allocate grsecurity audit log buffer");
62354+ return;
62355+ }
62356+
62357+ /* allocate memory for authentication structure */
62358+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
62359+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
62360+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
62361+
62362+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
62363+ panic("Unable to allocate grsecurity authentication structure");
62364+ return;
62365+ }
62366+
62367+
62368+#ifdef CONFIG_GRKERNSEC_IO
62369+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
62370+ grsec_disable_privio = 1;
62371+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
62372+ grsec_disable_privio = 1;
62373+#else
62374+ grsec_disable_privio = 0;
62375+#endif
62376+#endif
62377+
62378+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
62379+ /* for backward compatibility, tpe_invert always defaults to on if
62380+ enabled in the kernel
62381+ */
62382+ grsec_enable_tpe_invert = 1;
62383+#endif
62384+
62385+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
62386+#ifndef CONFIG_GRKERNSEC_SYSCTL
62387+ grsec_lock = 1;
62388+#endif
62389+
62390+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
62391+ grsec_enable_audit_textrel = 1;
62392+#endif
62393+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
62394+ grsec_enable_log_rwxmaps = 1;
62395+#endif
62396+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
62397+ grsec_enable_group = 1;
62398+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
62399+#endif
62400+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62401+ grsec_enable_chdir = 1;
62402+#endif
62403+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
62404+ grsec_enable_harden_ptrace = 1;
62405+#endif
62406+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
62407+ grsec_enable_mount = 1;
62408+#endif
62409+#ifdef CONFIG_GRKERNSEC_LINK
62410+ grsec_enable_link = 1;
62411+#endif
62412+#ifdef CONFIG_GRKERNSEC_BRUTE
62413+ grsec_enable_brute = 1;
62414+#endif
62415+#ifdef CONFIG_GRKERNSEC_DMESG
62416+ grsec_enable_dmesg = 1;
62417+#endif
62418+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
62419+ grsec_enable_blackhole = 1;
62420+ grsec_lastack_retries = 4;
62421+#endif
62422+#ifdef CONFIG_GRKERNSEC_FIFO
62423+ grsec_enable_fifo = 1;
62424+#endif
62425+#ifdef CONFIG_GRKERNSEC_EXECLOG
62426+ grsec_enable_execlog = 1;
62427+#endif
62428+#ifdef CONFIG_GRKERNSEC_SIGNAL
62429+ grsec_enable_signal = 1;
62430+#endif
62431+#ifdef CONFIG_GRKERNSEC_FORKFAIL
62432+ grsec_enable_forkfail = 1;
62433+#endif
62434+#ifdef CONFIG_GRKERNSEC_TIME
62435+ grsec_enable_time = 1;
62436+#endif
62437+#ifdef CONFIG_GRKERNSEC_RESLOG
62438+ grsec_resource_logging = 1;
62439+#endif
62440+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62441+ grsec_enable_chroot_findtask = 1;
62442+#endif
62443+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
62444+ grsec_enable_chroot_unix = 1;
62445+#endif
62446+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62447+ grsec_enable_chroot_mount = 1;
62448+#endif
62449+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62450+ grsec_enable_chroot_fchdir = 1;
62451+#endif
62452+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62453+ grsec_enable_chroot_shmat = 1;
62454+#endif
62455+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
62456+ grsec_enable_audit_ptrace = 1;
62457+#endif
62458+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62459+ grsec_enable_chroot_double = 1;
62460+#endif
62461+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62462+ grsec_enable_chroot_pivot = 1;
62463+#endif
62464+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62465+ grsec_enable_chroot_chdir = 1;
62466+#endif
62467+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62468+ grsec_enable_chroot_chmod = 1;
62469+#endif
62470+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62471+ grsec_enable_chroot_mknod = 1;
62472+#endif
62473+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62474+ grsec_enable_chroot_nice = 1;
62475+#endif
62476+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62477+ grsec_enable_chroot_execlog = 1;
62478+#endif
62479+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62480+ grsec_enable_chroot_caps = 1;
62481+#endif
62482+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62483+ grsec_enable_chroot_sysctl = 1;
62484+#endif
62485+#ifdef CONFIG_GRKERNSEC_TPE
62486+ grsec_enable_tpe = 1;
62487+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
62488+#ifdef CONFIG_GRKERNSEC_TPE_ALL
62489+ grsec_enable_tpe_all = 1;
62490+#endif
62491+#endif
62492+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
62493+ grsec_enable_socket_all = 1;
62494+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
62495+#endif
62496+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
62497+ grsec_enable_socket_client = 1;
62498+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
62499+#endif
62500+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
62501+ grsec_enable_socket_server = 1;
62502+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
62503+#endif
62504+#endif
62505+
62506+ return;
62507+}
62508diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
62509new file mode 100644
62510index 0000000..3efe141
62511--- /dev/null
62512+++ b/grsecurity/grsec_link.c
62513@@ -0,0 +1,43 @@
62514+#include <linux/kernel.h>
62515+#include <linux/sched.h>
62516+#include <linux/fs.h>
62517+#include <linux/file.h>
62518+#include <linux/grinternal.h>
62519+
62520+int
62521+gr_handle_follow_link(const struct inode *parent,
62522+ const struct inode *inode,
62523+ const struct dentry *dentry, const struct vfsmount *mnt)
62524+{
62525+#ifdef CONFIG_GRKERNSEC_LINK
62526+ const struct cred *cred = current_cred();
62527+
62528+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
62529+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
62530+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
62531+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
62532+ return -EACCES;
62533+ }
62534+#endif
62535+ return 0;
62536+}
62537+
62538+int
62539+gr_handle_hardlink(const struct dentry *dentry,
62540+ const struct vfsmount *mnt,
62541+ struct inode *inode, const int mode, const char *to)
62542+{
62543+#ifdef CONFIG_GRKERNSEC_LINK
62544+ const struct cred *cred = current_cred();
62545+
62546+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
62547+ (!S_ISREG(mode) || (mode & S_ISUID) ||
62548+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
62549+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
62550+ !capable(CAP_FOWNER) && cred->uid) {
62551+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
62552+ return -EPERM;
62553+ }
62554+#endif
62555+ return 0;
62556+}
62557diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
62558new file mode 100644
62559index 0000000..a45d2e9
62560--- /dev/null
62561+++ b/grsecurity/grsec_log.c
62562@@ -0,0 +1,322 @@
62563+#include <linux/kernel.h>
62564+#include <linux/sched.h>
62565+#include <linux/file.h>
62566+#include <linux/tty.h>
62567+#include <linux/fs.h>
62568+#include <linux/grinternal.h>
62569+
62570+#ifdef CONFIG_TREE_PREEMPT_RCU
62571+#define DISABLE_PREEMPT() preempt_disable()
62572+#define ENABLE_PREEMPT() preempt_enable()
62573+#else
62574+#define DISABLE_PREEMPT()
62575+#define ENABLE_PREEMPT()
62576+#endif
62577+
62578+#define BEGIN_LOCKS(x) \
62579+ DISABLE_PREEMPT(); \
62580+ rcu_read_lock(); \
62581+ read_lock(&tasklist_lock); \
62582+ read_lock(&grsec_exec_file_lock); \
62583+ if (x != GR_DO_AUDIT) \
62584+ spin_lock(&grsec_alert_lock); \
62585+ else \
62586+ spin_lock(&grsec_audit_lock)
62587+
62588+#define END_LOCKS(x) \
62589+ if (x != GR_DO_AUDIT) \
62590+ spin_unlock(&grsec_alert_lock); \
62591+ else \
62592+ spin_unlock(&grsec_audit_lock); \
62593+ read_unlock(&grsec_exec_file_lock); \
62594+ read_unlock(&tasklist_lock); \
62595+ rcu_read_unlock(); \
62596+ ENABLE_PREEMPT(); \
62597+ if (x == GR_DONT_AUDIT) \
62598+ gr_handle_alertkill(current)
62599+
62600+enum {
62601+ FLOODING,
62602+ NO_FLOODING
62603+};
62604+
62605+extern char *gr_alert_log_fmt;
62606+extern char *gr_audit_log_fmt;
62607+extern char *gr_alert_log_buf;
62608+extern char *gr_audit_log_buf;
62609+
62610+static int gr_log_start(int audit)
62611+{
62612+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
62613+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
62614+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62615+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
62616+ unsigned long curr_secs = get_seconds();
62617+
62618+ if (audit == GR_DO_AUDIT)
62619+ goto set_fmt;
62620+
62621+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
62622+ grsec_alert_wtime = curr_secs;
62623+ grsec_alert_fyet = 0;
62624+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
62625+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
62626+ grsec_alert_fyet++;
62627+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
62628+ grsec_alert_wtime = curr_secs;
62629+ grsec_alert_fyet++;
62630+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
62631+ return FLOODING;
62632+ }
62633+ else return FLOODING;
62634+
62635+set_fmt:
62636+#endif
62637+ memset(buf, 0, PAGE_SIZE);
62638+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
62639+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
62640+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
62641+ } else if (current->signal->curr_ip) {
62642+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
62643+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
62644+ } else if (gr_acl_is_enabled()) {
62645+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
62646+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
62647+ } else {
62648+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
62649+ strcpy(buf, fmt);
62650+ }
62651+
62652+ return NO_FLOODING;
62653+}
62654+
62655+static void gr_log_middle(int audit, const char *msg, va_list ap)
62656+ __attribute__ ((format (printf, 2, 0)));
62657+
62658+static void gr_log_middle(int audit, const char *msg, va_list ap)
62659+{
62660+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62661+ unsigned int len = strlen(buf);
62662+
62663+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
62664+
62665+ return;
62666+}
62667+
62668+static void gr_log_middle_varargs(int audit, const char *msg, ...)
62669+ __attribute__ ((format (printf, 2, 3)));
62670+
62671+static void gr_log_middle_varargs(int audit, const char *msg, ...)
62672+{
62673+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62674+ unsigned int len = strlen(buf);
62675+ va_list ap;
62676+
62677+ va_start(ap, msg);
62678+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
62679+ va_end(ap);
62680+
62681+ return;
62682+}
62683+
62684+static void gr_log_end(int audit, int append_default)
62685+{
62686+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62687+
62688+ if (append_default) {
62689+ unsigned int len = strlen(buf);
62690+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
62691+ }
62692+
62693+ printk("%s\n", buf);
62694+
62695+ return;
62696+}
62697+
62698+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
62699+{
62700+ int logtype;
62701+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
62702+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
62703+ void *voidptr = NULL;
62704+ int num1 = 0, num2 = 0;
62705+ unsigned long ulong1 = 0, ulong2 = 0;
62706+ struct dentry *dentry = NULL;
62707+ struct vfsmount *mnt = NULL;
62708+ struct file *file = NULL;
62709+ struct task_struct *task = NULL;
62710+ const struct cred *cred, *pcred;
62711+ va_list ap;
62712+
62713+ BEGIN_LOCKS(audit);
62714+ logtype = gr_log_start(audit);
62715+ if (logtype == FLOODING) {
62716+ END_LOCKS(audit);
62717+ return;
62718+ }
62719+ va_start(ap, argtypes);
62720+ switch (argtypes) {
62721+ case GR_TTYSNIFF:
62722+ task = va_arg(ap, struct task_struct *);
62723+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
62724+ break;
62725+ case GR_SYSCTL_HIDDEN:
62726+ str1 = va_arg(ap, char *);
62727+ gr_log_middle_varargs(audit, msg, result, str1);
62728+ break;
62729+ case GR_RBAC:
62730+ dentry = va_arg(ap, struct dentry *);
62731+ mnt = va_arg(ap, struct vfsmount *);
62732+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
62733+ break;
62734+ case GR_RBAC_STR:
62735+ dentry = va_arg(ap, struct dentry *);
62736+ mnt = va_arg(ap, struct vfsmount *);
62737+ str1 = va_arg(ap, char *);
62738+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
62739+ break;
62740+ case GR_STR_RBAC:
62741+ str1 = va_arg(ap, char *);
62742+ dentry = va_arg(ap, struct dentry *);
62743+ mnt = va_arg(ap, struct vfsmount *);
62744+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
62745+ break;
62746+ case GR_RBAC_MODE2:
62747+ dentry = va_arg(ap, struct dentry *);
62748+ mnt = va_arg(ap, struct vfsmount *);
62749+ str1 = va_arg(ap, char *);
62750+ str2 = va_arg(ap, char *);
62751+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
62752+ break;
62753+ case GR_RBAC_MODE3:
62754+ dentry = va_arg(ap, struct dentry *);
62755+ mnt = va_arg(ap, struct vfsmount *);
62756+ str1 = va_arg(ap, char *);
62757+ str2 = va_arg(ap, char *);
62758+ str3 = va_arg(ap, char *);
62759+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
62760+ break;
62761+ case GR_FILENAME:
62762+ dentry = va_arg(ap, struct dentry *);
62763+ mnt = va_arg(ap, struct vfsmount *);
62764+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
62765+ break;
62766+ case GR_STR_FILENAME:
62767+ str1 = va_arg(ap, char *);
62768+ dentry = va_arg(ap, struct dentry *);
62769+ mnt = va_arg(ap, struct vfsmount *);
62770+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
62771+ break;
62772+ case GR_FILENAME_STR:
62773+ dentry = va_arg(ap, struct dentry *);
62774+ mnt = va_arg(ap, struct vfsmount *);
62775+ str1 = va_arg(ap, char *);
62776+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
62777+ break;
62778+ case GR_FILENAME_TWO_INT:
62779+ dentry = va_arg(ap, struct dentry *);
62780+ mnt = va_arg(ap, struct vfsmount *);
62781+ num1 = va_arg(ap, int);
62782+ num2 = va_arg(ap, int);
62783+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
62784+ break;
62785+ case GR_FILENAME_TWO_INT_STR:
62786+ dentry = va_arg(ap, struct dentry *);
62787+ mnt = va_arg(ap, struct vfsmount *);
62788+ num1 = va_arg(ap, int);
62789+ num2 = va_arg(ap, int);
62790+ str1 = va_arg(ap, char *);
62791+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
62792+ break;
62793+ case GR_TEXTREL:
62794+ file = va_arg(ap, struct file *);
62795+ ulong1 = va_arg(ap, unsigned long);
62796+ ulong2 = va_arg(ap, unsigned long);
62797+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
62798+ break;
62799+ case GR_PTRACE:
62800+ task = va_arg(ap, struct task_struct *);
62801+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
62802+ break;
62803+ case GR_RESOURCE:
62804+ task = va_arg(ap, struct task_struct *);
62805+ cred = __task_cred(task);
62806+ pcred = __task_cred(task->real_parent);
62807+ ulong1 = va_arg(ap, unsigned long);
62808+ str1 = va_arg(ap, char *);
62809+ ulong2 = va_arg(ap, unsigned long);
62810+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
62811+ break;
62812+ case GR_CAP:
62813+ task = va_arg(ap, struct task_struct *);
62814+ cred = __task_cred(task);
62815+ pcred = __task_cred(task->real_parent);
62816+ str1 = va_arg(ap, char *);
62817+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
62818+ break;
62819+ case GR_SIG:
62820+ str1 = va_arg(ap, char *);
62821+ voidptr = va_arg(ap, void *);
62822+ gr_log_middle_varargs(audit, msg, str1, voidptr);
62823+ break;
62824+ case GR_SIG2:
62825+ task = va_arg(ap, struct task_struct *);
62826+ cred = __task_cred(task);
62827+ pcred = __task_cred(task->real_parent);
62828+ num1 = va_arg(ap, int);
62829+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
62830+ break;
62831+ case GR_CRASH1:
62832+ task = va_arg(ap, struct task_struct *);
62833+ cred = __task_cred(task);
62834+ pcred = __task_cred(task->real_parent);
62835+ ulong1 = va_arg(ap, unsigned long);
62836+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
62837+ break;
62838+ case GR_CRASH2:
62839+ task = va_arg(ap, struct task_struct *);
62840+ cred = __task_cred(task);
62841+ pcred = __task_cred(task->real_parent);
62842+ ulong1 = va_arg(ap, unsigned long);
62843+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
62844+ break;
62845+ case GR_RWXMAP:
62846+ file = va_arg(ap, struct file *);
62847+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
62848+ break;
62849+ case GR_PSACCT:
62850+ {
62851+ unsigned int wday, cday;
62852+ __u8 whr, chr;
62853+ __u8 wmin, cmin;
62854+ __u8 wsec, csec;
62855+ char cur_tty[64] = { 0 };
62856+ char parent_tty[64] = { 0 };
62857+
62858+ task = va_arg(ap, struct task_struct *);
62859+ wday = va_arg(ap, unsigned int);
62860+ cday = va_arg(ap, unsigned int);
62861+ whr = va_arg(ap, int);
62862+ chr = va_arg(ap, int);
62863+ wmin = va_arg(ap, int);
62864+ cmin = va_arg(ap, int);
62865+ wsec = va_arg(ap, int);
62866+ csec = va_arg(ap, int);
62867+ ulong1 = va_arg(ap, unsigned long);
62868+ cred = __task_cred(task);
62869+ pcred = __task_cred(task->real_parent);
62870+
62871+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
62872+ }
62873+ break;
62874+ default:
62875+ gr_log_middle(audit, msg, ap);
62876+ }
62877+ va_end(ap);
62878+ // these don't need DEFAULTSECARGS printed on the end
62879+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
62880+ gr_log_end(audit, 0);
62881+ else
62882+ gr_log_end(audit, 1);
62883+ END_LOCKS(audit);
62884+}
62885diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
62886new file mode 100644
62887index 0000000..6c0416b
62888--- /dev/null
62889+++ b/grsecurity/grsec_mem.c
62890@@ -0,0 +1,33 @@
62891+#include <linux/kernel.h>
62892+#include <linux/sched.h>
62893+#include <linux/mm.h>
62894+#include <linux/mman.h>
62895+#include <linux/grinternal.h>
62896+
62897+void
62898+gr_handle_ioperm(void)
62899+{
62900+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
62901+ return;
62902+}
62903+
62904+void
62905+gr_handle_iopl(void)
62906+{
62907+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
62908+ return;
62909+}
62910+
62911+void
62912+gr_handle_mem_readwrite(u64 from, u64 to)
62913+{
62914+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
62915+ return;
62916+}
62917+
62918+void
62919+gr_handle_vm86(void)
62920+{
62921+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
62922+ return;
62923+}
62924diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
62925new file mode 100644
62926index 0000000..2131422
62927--- /dev/null
62928+++ b/grsecurity/grsec_mount.c
62929@@ -0,0 +1,62 @@
62930+#include <linux/kernel.h>
62931+#include <linux/sched.h>
62932+#include <linux/mount.h>
62933+#include <linux/grsecurity.h>
62934+#include <linux/grinternal.h>
62935+
62936+void
62937+gr_log_remount(const char *devname, const int retval)
62938+{
62939+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
62940+ if (grsec_enable_mount && (retval >= 0))
62941+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
62942+#endif
62943+ return;
62944+}
62945+
62946+void
62947+gr_log_unmount(const char *devname, const int retval)
62948+{
62949+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
62950+ if (grsec_enable_mount && (retval >= 0))
62951+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
62952+#endif
62953+ return;
62954+}
62955+
62956+void
62957+gr_log_mount(const char *from, const char *to, const int retval)
62958+{
62959+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
62960+ if (grsec_enable_mount && (retval >= 0))
62961+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
62962+#endif
62963+ return;
62964+}
62965+
62966+int
62967+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
62968+{
62969+#ifdef CONFIG_GRKERNSEC_ROFS
62970+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
62971+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
62972+ return -EPERM;
62973+ } else
62974+ return 0;
62975+#endif
62976+ return 0;
62977+}
62978+
62979+int
62980+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
62981+{
62982+#ifdef CONFIG_GRKERNSEC_ROFS
62983+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
62984+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
62985+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
62986+ return -EPERM;
62987+ } else
62988+ return 0;
62989+#endif
62990+ return 0;
62991+}
62992diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
62993new file mode 100644
62994index 0000000..a3b12a0
62995--- /dev/null
62996+++ b/grsecurity/grsec_pax.c
62997@@ -0,0 +1,36 @@
62998+#include <linux/kernel.h>
62999+#include <linux/sched.h>
63000+#include <linux/mm.h>
63001+#include <linux/file.h>
63002+#include <linux/grinternal.h>
63003+#include <linux/grsecurity.h>
63004+
63005+void
63006+gr_log_textrel(struct vm_area_struct * vma)
63007+{
63008+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63009+ if (grsec_enable_audit_textrel)
63010+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
63011+#endif
63012+ return;
63013+}
63014+
63015+void
63016+gr_log_rwxmmap(struct file *file)
63017+{
63018+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63019+ if (grsec_enable_log_rwxmaps)
63020+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
63021+#endif
63022+ return;
63023+}
63024+
63025+void
63026+gr_log_rwxmprotect(struct file *file)
63027+{
63028+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63029+ if (grsec_enable_log_rwxmaps)
63030+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
63031+#endif
63032+ return;
63033+}
63034diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
63035new file mode 100644
63036index 0000000..472c1d6
63037--- /dev/null
63038+++ b/grsecurity/grsec_ptrace.c
63039@@ -0,0 +1,14 @@
63040+#include <linux/kernel.h>
63041+#include <linux/sched.h>
63042+#include <linux/grinternal.h>
63043+#include <linux/grsecurity.h>
63044+
63045+void
63046+gr_audit_ptrace(struct task_struct *task)
63047+{
63048+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63049+ if (grsec_enable_audit_ptrace)
63050+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
63051+#endif
63052+ return;
63053+}
63054diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
63055new file mode 100644
63056index 0000000..dc73fe9
63057--- /dev/null
63058+++ b/grsecurity/grsec_sig.c
63059@@ -0,0 +1,205 @@
63060+#include <linux/kernel.h>
63061+#include <linux/sched.h>
63062+#include <linux/delay.h>
63063+#include <linux/grsecurity.h>
63064+#include <linux/grinternal.h>
63065+#include <linux/hardirq.h>
63066+
63067+char *signames[] = {
63068+ [SIGSEGV] = "Segmentation fault",
63069+ [SIGILL] = "Illegal instruction",
63070+ [SIGABRT] = "Abort",
63071+ [SIGBUS] = "Invalid alignment/Bus error"
63072+};
63073+
63074+void
63075+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
63076+{
63077+#ifdef CONFIG_GRKERNSEC_SIGNAL
63078+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
63079+ (sig == SIGABRT) || (sig == SIGBUS))) {
63080+ if (t->pid == current->pid) {
63081+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
63082+ } else {
63083+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
63084+ }
63085+ }
63086+#endif
63087+ return;
63088+}
63089+
63090+int
63091+gr_handle_signal(const struct task_struct *p, const int sig)
63092+{
63093+#ifdef CONFIG_GRKERNSEC
63094+ if (current->pid > 1 && gr_check_protected_task(p)) {
63095+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
63096+ return -EPERM;
63097+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
63098+ return -EPERM;
63099+ }
63100+#endif
63101+ return 0;
63102+}
63103+
63104+#ifdef CONFIG_GRKERNSEC
63105+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
63106+
63107+int gr_fake_force_sig(int sig, struct task_struct *t)
63108+{
63109+ unsigned long int flags;
63110+ int ret, blocked, ignored;
63111+ struct k_sigaction *action;
63112+
63113+ spin_lock_irqsave(&t->sighand->siglock, flags);
63114+ action = &t->sighand->action[sig-1];
63115+ ignored = action->sa.sa_handler == SIG_IGN;
63116+ blocked = sigismember(&t->blocked, sig);
63117+ if (blocked || ignored) {
63118+ action->sa.sa_handler = SIG_DFL;
63119+ if (blocked) {
63120+ sigdelset(&t->blocked, sig);
63121+ recalc_sigpending_and_wake(t);
63122+ }
63123+ }
63124+ if (action->sa.sa_handler == SIG_DFL)
63125+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
63126+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
63127+
63128+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
63129+
63130+ return ret;
63131+}
63132+#endif
63133+
63134+#ifdef CONFIG_GRKERNSEC_BRUTE
63135+#define GR_USER_BAN_TIME (15 * 60)
63136+
63137+static int __get_dumpable(unsigned long mm_flags)
63138+{
63139+ int ret;
63140+
63141+ ret = mm_flags & MMF_DUMPABLE_MASK;
63142+ return (ret >= 2) ? 2 : ret;
63143+}
63144+#endif
63145+
63146+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
63147+{
63148+#ifdef CONFIG_GRKERNSEC_BRUTE
63149+ uid_t uid = 0;
63150+
63151+ if (!grsec_enable_brute)
63152+ return;
63153+
63154+ rcu_read_lock();
63155+ read_lock(&tasklist_lock);
63156+ read_lock(&grsec_exec_file_lock);
63157+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
63158+ p->real_parent->brute = 1;
63159+ else {
63160+ const struct cred *cred = __task_cred(p), *cred2;
63161+ struct task_struct *tsk, *tsk2;
63162+
63163+ if (!__get_dumpable(mm_flags) && cred->uid) {
63164+ struct user_struct *user;
63165+
63166+ uid = cred->uid;
63167+
63168+ /* this is put upon execution past expiration */
63169+ user = find_user(uid);
63170+ if (user == NULL)
63171+ goto unlock;
63172+ user->banned = 1;
63173+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
63174+ if (user->ban_expires == ~0UL)
63175+ user->ban_expires--;
63176+
63177+ do_each_thread(tsk2, tsk) {
63178+ cred2 = __task_cred(tsk);
63179+ if (tsk != p && cred2->uid == uid)
63180+ gr_fake_force_sig(SIGKILL, tsk);
63181+ } while_each_thread(tsk2, tsk);
63182+ }
63183+ }
63184+unlock:
63185+ read_unlock(&grsec_exec_file_lock);
63186+ read_unlock(&tasklist_lock);
63187+ rcu_read_unlock();
63188+
63189+ if (uid)
63190+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
63191+#endif
63192+ return;
63193+}
63194+
63195+void gr_handle_brute_check(void)
63196+{
63197+#ifdef CONFIG_GRKERNSEC_BRUTE
63198+ if (current->brute)
63199+ msleep(30 * 1000);
63200+#endif
63201+ return;
63202+}
63203+
63204+void gr_handle_kernel_exploit(void)
63205+{
63206+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
63207+ const struct cred *cred;
63208+ struct task_struct *tsk, *tsk2;
63209+ struct user_struct *user;
63210+ uid_t uid;
63211+
63212+ if (in_irq() || in_serving_softirq() || in_nmi())
63213+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
63214+
63215+ uid = current_uid();
63216+
63217+ if (uid == 0)
63218+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
63219+ else {
63220+ /* kill all the processes of this user, hold a reference
63221+ to their creds struct, and prevent them from creating
63222+ another process until system reset
63223+ */
63224+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
63225+ /* we intentionally leak this ref */
63226+ user = get_uid(current->cred->user);
63227+ if (user) {
63228+ user->banned = 1;
63229+ user->ban_expires = ~0UL;
63230+ }
63231+
63232+ read_lock(&tasklist_lock);
63233+ do_each_thread(tsk2, tsk) {
63234+ cred = __task_cred(tsk);
63235+ if (cred->uid == uid)
63236+ gr_fake_force_sig(SIGKILL, tsk);
63237+ } while_each_thread(tsk2, tsk);
63238+ read_unlock(&tasklist_lock);
63239+ }
63240+#endif
63241+}
63242+
63243+int __gr_process_user_ban(struct user_struct *user)
63244+{
63245+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63246+ if (unlikely(user->banned)) {
63247+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
63248+ user->banned = 0;
63249+ user->ban_expires = 0;
63250+ free_uid(user);
63251+ } else
63252+ return -EPERM;
63253+ }
63254+#endif
63255+ return 0;
63256+}
63257+
63258+int gr_process_user_ban(void)
63259+{
63260+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63261+ return __gr_process_user_ban(current->cred->user);
63262+#endif
63263+ return 0;
63264+}
63265diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
63266new file mode 100644
63267index 0000000..7512ea9
63268--- /dev/null
63269+++ b/grsecurity/grsec_sock.c
63270@@ -0,0 +1,275 @@
63271+#include <linux/kernel.h>
63272+#include <linux/module.h>
63273+#include <linux/sched.h>
63274+#include <linux/file.h>
63275+#include <linux/net.h>
63276+#include <linux/in.h>
63277+#include <linux/ip.h>
63278+#include <net/sock.h>
63279+#include <net/inet_sock.h>
63280+#include <linux/grsecurity.h>
63281+#include <linux/grinternal.h>
63282+#include <linux/gracl.h>
63283+
63284+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
63285+EXPORT_SYMBOL(gr_cap_rtnetlink);
63286+
63287+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
63288+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
63289+
63290+EXPORT_SYMBOL(gr_search_udp_recvmsg);
63291+EXPORT_SYMBOL(gr_search_udp_sendmsg);
63292+
63293+#ifdef CONFIG_UNIX_MODULE
63294+EXPORT_SYMBOL(gr_acl_handle_unix);
63295+EXPORT_SYMBOL(gr_acl_handle_mknod);
63296+EXPORT_SYMBOL(gr_handle_chroot_unix);
63297+EXPORT_SYMBOL(gr_handle_create);
63298+#endif
63299+
63300+#ifdef CONFIG_GRKERNSEC
63301+#define gr_conn_table_size 32749
63302+struct conn_table_entry {
63303+ struct conn_table_entry *next;
63304+ struct signal_struct *sig;
63305+};
63306+
63307+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
63308+DEFINE_SPINLOCK(gr_conn_table_lock);
63309+
63310+extern const char * gr_socktype_to_name(unsigned char type);
63311+extern const char * gr_proto_to_name(unsigned char proto);
63312+extern const char * gr_sockfamily_to_name(unsigned char family);
63313+
63314+static __inline__ int
63315+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
63316+{
63317+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
63318+}
63319+
63320+static __inline__ int
63321+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
63322+ __u16 sport, __u16 dport)
63323+{
63324+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
63325+ sig->gr_sport == sport && sig->gr_dport == dport))
63326+ return 1;
63327+ else
63328+ return 0;
63329+}
63330+
63331+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
63332+{
63333+ struct conn_table_entry **match;
63334+ unsigned int index;
63335+
63336+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
63337+ sig->gr_sport, sig->gr_dport,
63338+ gr_conn_table_size);
63339+
63340+ newent->sig = sig;
63341+
63342+ match = &gr_conn_table[index];
63343+ newent->next = *match;
63344+ *match = newent;
63345+
63346+ return;
63347+}
63348+
63349+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
63350+{
63351+ struct conn_table_entry *match, *last = NULL;
63352+ unsigned int index;
63353+
63354+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
63355+ sig->gr_sport, sig->gr_dport,
63356+ gr_conn_table_size);
63357+
63358+ match = gr_conn_table[index];
63359+ while (match && !conn_match(match->sig,
63360+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
63361+ sig->gr_dport)) {
63362+ last = match;
63363+ match = match->next;
63364+ }
63365+
63366+ if (match) {
63367+ if (last)
63368+ last->next = match->next;
63369+ else
63370+ gr_conn_table[index] = NULL;
63371+ kfree(match);
63372+ }
63373+
63374+ return;
63375+}
63376+
63377+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
63378+ __u16 sport, __u16 dport)
63379+{
63380+ struct conn_table_entry *match;
63381+ unsigned int index;
63382+
63383+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
63384+
63385+ match = gr_conn_table[index];
63386+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
63387+ match = match->next;
63388+
63389+ if (match)
63390+ return match->sig;
63391+ else
63392+ return NULL;
63393+}
63394+
63395+#endif
63396+
63397+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
63398+{
63399+#ifdef CONFIG_GRKERNSEC
63400+ struct signal_struct *sig = task->signal;
63401+ struct conn_table_entry *newent;
63402+
63403+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
63404+ if (newent == NULL)
63405+ return;
63406+ /* no bh lock needed since we are called with bh disabled */
63407+ spin_lock(&gr_conn_table_lock);
63408+ gr_del_task_from_ip_table_nolock(sig);
63409+ sig->gr_saddr = inet->rcv_saddr;
63410+ sig->gr_daddr = inet->daddr;
63411+ sig->gr_sport = inet->sport;
63412+ sig->gr_dport = inet->dport;
63413+ gr_add_to_task_ip_table_nolock(sig, newent);
63414+ spin_unlock(&gr_conn_table_lock);
63415+#endif
63416+ return;
63417+}
63418+
63419+void gr_del_task_from_ip_table(struct task_struct *task)
63420+{
63421+#ifdef CONFIG_GRKERNSEC
63422+ spin_lock_bh(&gr_conn_table_lock);
63423+ gr_del_task_from_ip_table_nolock(task->signal);
63424+ spin_unlock_bh(&gr_conn_table_lock);
63425+#endif
63426+ return;
63427+}
63428+
63429+void
63430+gr_attach_curr_ip(const struct sock *sk)
63431+{
63432+#ifdef CONFIG_GRKERNSEC
63433+ struct signal_struct *p, *set;
63434+ const struct inet_sock *inet = inet_sk(sk);
63435+
63436+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
63437+ return;
63438+
63439+ set = current->signal;
63440+
63441+ spin_lock_bh(&gr_conn_table_lock);
63442+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
63443+ inet->dport, inet->sport);
63444+ if (unlikely(p != NULL)) {
63445+ set->curr_ip = p->curr_ip;
63446+ set->used_accept = 1;
63447+ gr_del_task_from_ip_table_nolock(p);
63448+ spin_unlock_bh(&gr_conn_table_lock);
63449+ return;
63450+ }
63451+ spin_unlock_bh(&gr_conn_table_lock);
63452+
63453+ set->curr_ip = inet->daddr;
63454+ set->used_accept = 1;
63455+#endif
63456+ return;
63457+}
63458+
63459+int
63460+gr_handle_sock_all(const int family, const int type, const int protocol)
63461+{
63462+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63463+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
63464+ (family != AF_UNIX)) {
63465+ if (family == AF_INET)
63466+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
63467+ else
63468+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
63469+ return -EACCES;
63470+ }
63471+#endif
63472+ return 0;
63473+}
63474+
63475+int
63476+gr_handle_sock_server(const struct sockaddr *sck)
63477+{
63478+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63479+ if (grsec_enable_socket_server &&
63480+ in_group_p(grsec_socket_server_gid) &&
63481+ sck && (sck->sa_family != AF_UNIX) &&
63482+ (sck->sa_family != AF_LOCAL)) {
63483+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
63484+ return -EACCES;
63485+ }
63486+#endif
63487+ return 0;
63488+}
63489+
63490+int
63491+gr_handle_sock_server_other(const struct sock *sck)
63492+{
63493+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63494+ if (grsec_enable_socket_server &&
63495+ in_group_p(grsec_socket_server_gid) &&
63496+ sck && (sck->sk_family != AF_UNIX) &&
63497+ (sck->sk_family != AF_LOCAL)) {
63498+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
63499+ return -EACCES;
63500+ }
63501+#endif
63502+ return 0;
63503+}
63504+
63505+int
63506+gr_handle_sock_client(const struct sockaddr *sck)
63507+{
63508+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63509+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
63510+ sck && (sck->sa_family != AF_UNIX) &&
63511+ (sck->sa_family != AF_LOCAL)) {
63512+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
63513+ return -EACCES;
63514+ }
63515+#endif
63516+ return 0;
63517+}
63518+
63519+kernel_cap_t
63520+gr_cap_rtnetlink(struct sock *sock)
63521+{
63522+#ifdef CONFIG_GRKERNSEC
63523+ if (!gr_acl_is_enabled())
63524+ return current_cap();
63525+ else if (sock->sk_protocol == NETLINK_ISCSI &&
63526+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
63527+ gr_is_capable(CAP_SYS_ADMIN))
63528+ return current_cap();
63529+ else if (sock->sk_protocol == NETLINK_AUDIT &&
63530+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
63531+ gr_is_capable(CAP_AUDIT_WRITE) &&
63532+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
63533+ gr_is_capable(CAP_AUDIT_CONTROL))
63534+ return current_cap();
63535+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
63536+ ((sock->sk_protocol == NETLINK_ROUTE) ?
63537+ gr_is_capable_nolog(CAP_NET_ADMIN) :
63538+ gr_is_capable(CAP_NET_ADMIN)))
63539+ return current_cap();
63540+ else
63541+ return __cap_empty_set;
63542+#else
63543+ return current_cap();
63544+#endif
63545+}
63546diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
63547new file mode 100644
63548index 0000000..2753505
63549--- /dev/null
63550+++ b/grsecurity/grsec_sysctl.c
63551@@ -0,0 +1,479 @@
63552+#include <linux/kernel.h>
63553+#include <linux/sched.h>
63554+#include <linux/sysctl.h>
63555+#include <linux/grsecurity.h>
63556+#include <linux/grinternal.h>
63557+
63558+int
63559+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
63560+{
63561+#ifdef CONFIG_GRKERNSEC_SYSCTL
63562+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
63563+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
63564+ return -EACCES;
63565+ }
63566+#endif
63567+ return 0;
63568+}
63569+
63570+#ifdef CONFIG_GRKERNSEC_ROFS
63571+static int __maybe_unused one = 1;
63572+#endif
63573+
63574+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
63575+ctl_table grsecurity_table[] = {
63576+#ifdef CONFIG_GRKERNSEC_SYSCTL
63577+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
63578+#ifdef CONFIG_GRKERNSEC_IO
63579+ {
63580+ .ctl_name = CTL_UNNUMBERED,
63581+ .procname = "disable_priv_io",
63582+ .data = &grsec_disable_privio,
63583+ .maxlen = sizeof(int),
63584+ .mode = 0600,
63585+ .proc_handler = &proc_dointvec,
63586+ },
63587+#endif
63588+#endif
63589+#ifdef CONFIG_GRKERNSEC_LINK
63590+ {
63591+ .ctl_name = CTL_UNNUMBERED,
63592+ .procname = "linking_restrictions",
63593+ .data = &grsec_enable_link,
63594+ .maxlen = sizeof(int),
63595+ .mode = 0600,
63596+ .proc_handler = &proc_dointvec,
63597+ },
63598+#endif
63599+#ifdef CONFIG_GRKERNSEC_BRUTE
63600+ {
63601+ .ctl_name = CTL_UNNUMBERED,
63602+ .procname = "deter_bruteforce",
63603+ .data = &grsec_enable_brute,
63604+ .maxlen = sizeof(int),
63605+ .mode = 0600,
63606+ .proc_handler = &proc_dointvec,
63607+ },
63608+#endif
63609+#ifdef CONFIG_GRKERNSEC_FIFO
63610+ {
63611+ .ctl_name = CTL_UNNUMBERED,
63612+ .procname = "fifo_restrictions",
63613+ .data = &grsec_enable_fifo,
63614+ .maxlen = sizeof(int),
63615+ .mode = 0600,
63616+ .proc_handler = &proc_dointvec,
63617+ },
63618+#endif
63619+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63620+ {
63621+ .ctl_name = CTL_UNNUMBERED,
63622+ .procname = "ip_blackhole",
63623+ .data = &grsec_enable_blackhole,
63624+ .maxlen = sizeof(int),
63625+ .mode = 0600,
63626+ .proc_handler = &proc_dointvec,
63627+ },
63628+ {
63629+ .ctl_name = CTL_UNNUMBERED,
63630+ .procname = "lastack_retries",
63631+ .data = &grsec_lastack_retries,
63632+ .maxlen = sizeof(int),
63633+ .mode = 0600,
63634+ .proc_handler = &proc_dointvec,
63635+ },
63636+#endif
63637+#ifdef CONFIG_GRKERNSEC_EXECLOG
63638+ {
63639+ .ctl_name = CTL_UNNUMBERED,
63640+ .procname = "exec_logging",
63641+ .data = &grsec_enable_execlog,
63642+ .maxlen = sizeof(int),
63643+ .mode = 0600,
63644+ .proc_handler = &proc_dointvec,
63645+ },
63646+#endif
63647+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63648+ {
63649+ .ctl_name = CTL_UNNUMBERED,
63650+ .procname = "rwxmap_logging",
63651+ .data = &grsec_enable_log_rwxmaps,
63652+ .maxlen = sizeof(int),
63653+ .mode = 0600,
63654+ .proc_handler = &proc_dointvec,
63655+ },
63656+#endif
63657+#ifdef CONFIG_GRKERNSEC_SIGNAL
63658+ {
63659+ .ctl_name = CTL_UNNUMBERED,
63660+ .procname = "signal_logging",
63661+ .data = &grsec_enable_signal,
63662+ .maxlen = sizeof(int),
63663+ .mode = 0600,
63664+ .proc_handler = &proc_dointvec,
63665+ },
63666+#endif
63667+#ifdef CONFIG_GRKERNSEC_FORKFAIL
63668+ {
63669+ .ctl_name = CTL_UNNUMBERED,
63670+ .procname = "forkfail_logging",
63671+ .data = &grsec_enable_forkfail,
63672+ .maxlen = sizeof(int),
63673+ .mode = 0600,
63674+ .proc_handler = &proc_dointvec,
63675+ },
63676+#endif
63677+#ifdef CONFIG_GRKERNSEC_TIME
63678+ {
63679+ .ctl_name = CTL_UNNUMBERED,
63680+ .procname = "timechange_logging",
63681+ .data = &grsec_enable_time,
63682+ .maxlen = sizeof(int),
63683+ .mode = 0600,
63684+ .proc_handler = &proc_dointvec,
63685+ },
63686+#endif
63687+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63688+ {
63689+ .ctl_name = CTL_UNNUMBERED,
63690+ .procname = "chroot_deny_shmat",
63691+ .data = &grsec_enable_chroot_shmat,
63692+ .maxlen = sizeof(int),
63693+ .mode = 0600,
63694+ .proc_handler = &proc_dointvec,
63695+ },
63696+#endif
63697+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63698+ {
63699+ .ctl_name = CTL_UNNUMBERED,
63700+ .procname = "chroot_deny_unix",
63701+ .data = &grsec_enable_chroot_unix,
63702+ .maxlen = sizeof(int),
63703+ .mode = 0600,
63704+ .proc_handler = &proc_dointvec,
63705+ },
63706+#endif
63707+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63708+ {
63709+ .ctl_name = CTL_UNNUMBERED,
63710+ .procname = "chroot_deny_mount",
63711+ .data = &grsec_enable_chroot_mount,
63712+ .maxlen = sizeof(int),
63713+ .mode = 0600,
63714+ .proc_handler = &proc_dointvec,
63715+ },
63716+#endif
63717+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63718+ {
63719+ .ctl_name = CTL_UNNUMBERED,
63720+ .procname = "chroot_deny_fchdir",
63721+ .data = &grsec_enable_chroot_fchdir,
63722+ .maxlen = sizeof(int),
63723+ .mode = 0600,
63724+ .proc_handler = &proc_dointvec,
63725+ },
63726+#endif
63727+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63728+ {
63729+ .ctl_name = CTL_UNNUMBERED,
63730+ .procname = "chroot_deny_chroot",
63731+ .data = &grsec_enable_chroot_double,
63732+ .maxlen = sizeof(int),
63733+ .mode = 0600,
63734+ .proc_handler = &proc_dointvec,
63735+ },
63736+#endif
63737+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63738+ {
63739+ .ctl_name = CTL_UNNUMBERED,
63740+ .procname = "chroot_deny_pivot",
63741+ .data = &grsec_enable_chroot_pivot,
63742+ .maxlen = sizeof(int),
63743+ .mode = 0600,
63744+ .proc_handler = &proc_dointvec,
63745+ },
63746+#endif
63747+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63748+ {
63749+ .ctl_name = CTL_UNNUMBERED,
63750+ .procname = "chroot_enforce_chdir",
63751+ .data = &grsec_enable_chroot_chdir,
63752+ .maxlen = sizeof(int),
63753+ .mode = 0600,
63754+ .proc_handler = &proc_dointvec,
63755+ },
63756+#endif
63757+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63758+ {
63759+ .ctl_name = CTL_UNNUMBERED,
63760+ .procname = "chroot_deny_chmod",
63761+ .data = &grsec_enable_chroot_chmod,
63762+ .maxlen = sizeof(int),
63763+ .mode = 0600,
63764+ .proc_handler = &proc_dointvec,
63765+ },
63766+#endif
63767+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63768+ {
63769+ .ctl_name = CTL_UNNUMBERED,
63770+ .procname = "chroot_deny_mknod",
63771+ .data = &grsec_enable_chroot_mknod,
63772+ .maxlen = sizeof(int),
63773+ .mode = 0600,
63774+ .proc_handler = &proc_dointvec,
63775+ },
63776+#endif
63777+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63778+ {
63779+ .ctl_name = CTL_UNNUMBERED,
63780+ .procname = "chroot_restrict_nice",
63781+ .data = &grsec_enable_chroot_nice,
63782+ .maxlen = sizeof(int),
63783+ .mode = 0600,
63784+ .proc_handler = &proc_dointvec,
63785+ },
63786+#endif
63787+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63788+ {
63789+ .ctl_name = CTL_UNNUMBERED,
63790+ .procname = "chroot_execlog",
63791+ .data = &grsec_enable_chroot_execlog,
63792+ .maxlen = sizeof(int),
63793+ .mode = 0600,
63794+ .proc_handler = &proc_dointvec,
63795+ },
63796+#endif
63797+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63798+ {
63799+ .ctl_name = CTL_UNNUMBERED,
63800+ .procname = "chroot_caps",
63801+ .data = &grsec_enable_chroot_caps,
63802+ .maxlen = sizeof(int),
63803+ .mode = 0600,
63804+ .proc_handler = &proc_dointvec,
63805+ },
63806+#endif
63807+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63808+ {
63809+ .ctl_name = CTL_UNNUMBERED,
63810+ .procname = "chroot_deny_sysctl",
63811+ .data = &grsec_enable_chroot_sysctl,
63812+ .maxlen = sizeof(int),
63813+ .mode = 0600,
63814+ .proc_handler = &proc_dointvec,
63815+ },
63816+#endif
63817+#ifdef CONFIG_GRKERNSEC_TPE
63818+ {
63819+ .ctl_name = CTL_UNNUMBERED,
63820+ .procname = "tpe",
63821+ .data = &grsec_enable_tpe,
63822+ .maxlen = sizeof(int),
63823+ .mode = 0600,
63824+ .proc_handler = &proc_dointvec,
63825+ },
63826+ {
63827+ .ctl_name = CTL_UNNUMBERED,
63828+ .procname = "tpe_gid",
63829+ .data = &grsec_tpe_gid,
63830+ .maxlen = sizeof(int),
63831+ .mode = 0600,
63832+ .proc_handler = &proc_dointvec,
63833+ },
63834+#endif
63835+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
63836+ {
63837+ .ctl_name = CTL_UNNUMBERED,
63838+ .procname = "tpe_invert",
63839+ .data = &grsec_enable_tpe_invert,
63840+ .maxlen = sizeof(int),
63841+ .mode = 0600,
63842+ .proc_handler = &proc_dointvec,
63843+ },
63844+#endif
63845+#ifdef CONFIG_GRKERNSEC_TPE_ALL
63846+ {
63847+ .ctl_name = CTL_UNNUMBERED,
63848+ .procname = "tpe_restrict_all",
63849+ .data = &grsec_enable_tpe_all,
63850+ .maxlen = sizeof(int),
63851+ .mode = 0600,
63852+ .proc_handler = &proc_dointvec,
63853+ },
63854+#endif
63855+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63856+ {
63857+ .ctl_name = CTL_UNNUMBERED,
63858+ .procname = "socket_all",
63859+ .data = &grsec_enable_socket_all,
63860+ .maxlen = sizeof(int),
63861+ .mode = 0600,
63862+ .proc_handler = &proc_dointvec,
63863+ },
63864+ {
63865+ .ctl_name = CTL_UNNUMBERED,
63866+ .procname = "socket_all_gid",
63867+ .data = &grsec_socket_all_gid,
63868+ .maxlen = sizeof(int),
63869+ .mode = 0600,
63870+ .proc_handler = &proc_dointvec,
63871+ },
63872+#endif
63873+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63874+ {
63875+ .ctl_name = CTL_UNNUMBERED,
63876+ .procname = "socket_client",
63877+ .data = &grsec_enable_socket_client,
63878+ .maxlen = sizeof(int),
63879+ .mode = 0600,
63880+ .proc_handler = &proc_dointvec,
63881+ },
63882+ {
63883+ .ctl_name = CTL_UNNUMBERED,
63884+ .procname = "socket_client_gid",
63885+ .data = &grsec_socket_client_gid,
63886+ .maxlen = sizeof(int),
63887+ .mode = 0600,
63888+ .proc_handler = &proc_dointvec,
63889+ },
63890+#endif
63891+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63892+ {
63893+ .ctl_name = CTL_UNNUMBERED,
63894+ .procname = "socket_server",
63895+ .data = &grsec_enable_socket_server,
63896+ .maxlen = sizeof(int),
63897+ .mode = 0600,
63898+ .proc_handler = &proc_dointvec,
63899+ },
63900+ {
63901+ .ctl_name = CTL_UNNUMBERED,
63902+ .procname = "socket_server_gid",
63903+ .data = &grsec_socket_server_gid,
63904+ .maxlen = sizeof(int),
63905+ .mode = 0600,
63906+ .proc_handler = &proc_dointvec,
63907+ },
63908+#endif
63909+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
63910+ {
63911+ .ctl_name = CTL_UNNUMBERED,
63912+ .procname = "audit_group",
63913+ .data = &grsec_enable_group,
63914+ .maxlen = sizeof(int),
63915+ .mode = 0600,
63916+ .proc_handler = &proc_dointvec,
63917+ },
63918+ {
63919+ .ctl_name = CTL_UNNUMBERED,
63920+ .procname = "audit_gid",
63921+ .data = &grsec_audit_gid,
63922+ .maxlen = sizeof(int),
63923+ .mode = 0600,
63924+ .proc_handler = &proc_dointvec,
63925+ },
63926+#endif
63927+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63928+ {
63929+ .ctl_name = CTL_UNNUMBERED,
63930+ .procname = "audit_chdir",
63931+ .data = &grsec_enable_chdir,
63932+ .maxlen = sizeof(int),
63933+ .mode = 0600,
63934+ .proc_handler = &proc_dointvec,
63935+ },
63936+#endif
63937+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63938+ {
63939+ .ctl_name = CTL_UNNUMBERED,
63940+ .procname = "audit_mount",
63941+ .data = &grsec_enable_mount,
63942+ .maxlen = sizeof(int),
63943+ .mode = 0600,
63944+ .proc_handler = &proc_dointvec,
63945+ },
63946+#endif
63947+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63948+ {
63949+ .ctl_name = CTL_UNNUMBERED,
63950+ .procname = "audit_textrel",
63951+ .data = &grsec_enable_audit_textrel,
63952+ .maxlen = sizeof(int),
63953+ .mode = 0600,
63954+ .proc_handler = &proc_dointvec,
63955+ },
63956+#endif
63957+#ifdef CONFIG_GRKERNSEC_DMESG
63958+ {
63959+ .ctl_name = CTL_UNNUMBERED,
63960+ .procname = "dmesg",
63961+ .data = &grsec_enable_dmesg,
63962+ .maxlen = sizeof(int),
63963+ .mode = 0600,
63964+ .proc_handler = &proc_dointvec,
63965+ },
63966+#endif
63967+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63968+ {
63969+ .ctl_name = CTL_UNNUMBERED,
63970+ .procname = "chroot_findtask",
63971+ .data = &grsec_enable_chroot_findtask,
63972+ .maxlen = sizeof(int),
63973+ .mode = 0600,
63974+ .proc_handler = &proc_dointvec,
63975+ },
63976+#endif
63977+#ifdef CONFIG_GRKERNSEC_RESLOG
63978+ {
63979+ .ctl_name = CTL_UNNUMBERED,
63980+ .procname = "resource_logging",
63981+ .data = &grsec_resource_logging,
63982+ .maxlen = sizeof(int),
63983+ .mode = 0600,
63984+ .proc_handler = &proc_dointvec,
63985+ },
63986+#endif
63987+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63988+ {
63989+ .ctl_name = CTL_UNNUMBERED,
63990+ .procname = "audit_ptrace",
63991+ .data = &grsec_enable_audit_ptrace,
63992+ .maxlen = sizeof(int),
63993+ .mode = 0600,
63994+ .proc_handler = &proc_dointvec,
63995+ },
63996+#endif
63997+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
63998+ {
63999+ .ctl_name = CTL_UNNUMBERED,
64000+ .procname = "harden_ptrace",
64001+ .data = &grsec_enable_harden_ptrace,
64002+ .maxlen = sizeof(int),
64003+ .mode = 0600,
64004+ .proc_handler = &proc_dointvec,
64005+ },
64006+#endif
64007+ {
64008+ .ctl_name = CTL_UNNUMBERED,
64009+ .procname = "grsec_lock",
64010+ .data = &grsec_lock,
64011+ .maxlen = sizeof(int),
64012+ .mode = 0600,
64013+ .proc_handler = &proc_dointvec,
64014+ },
64015+#endif
64016+#ifdef CONFIG_GRKERNSEC_ROFS
64017+ {
64018+ .ctl_name = CTL_UNNUMBERED,
64019+ .procname = "romount_protect",
64020+ .data = &grsec_enable_rofs,
64021+ .maxlen = sizeof(int),
64022+ .mode = 0600,
64023+ .proc_handler = &proc_dointvec_minmax,
64024+ .extra1 = &one,
64025+ .extra2 = &one,
64026+ },
64027+#endif
64028+ { .ctl_name = 0 }
64029+};
64030+#endif
64031diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
64032new file mode 100644
64033index 0000000..0dc13c3
64034--- /dev/null
64035+++ b/grsecurity/grsec_time.c
64036@@ -0,0 +1,16 @@
64037+#include <linux/kernel.h>
64038+#include <linux/sched.h>
64039+#include <linux/grinternal.h>
64040+#include <linux/module.h>
64041+
64042+void
64043+gr_log_timechange(void)
64044+{
64045+#ifdef CONFIG_GRKERNSEC_TIME
64046+ if (grsec_enable_time)
64047+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
64048+#endif
64049+ return;
64050+}
64051+
64052+EXPORT_SYMBOL(gr_log_timechange);
64053diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
64054new file mode 100644
64055index 0000000..4a78774
64056--- /dev/null
64057+++ b/grsecurity/grsec_tpe.c
64058@@ -0,0 +1,39 @@
64059+#include <linux/kernel.h>
64060+#include <linux/sched.h>
64061+#include <linux/file.h>
64062+#include <linux/fs.h>
64063+#include <linux/grinternal.h>
64064+
64065+extern int gr_acl_tpe_check(void);
64066+
64067+int
64068+gr_tpe_allow(const struct file *file)
64069+{
64070+#ifdef CONFIG_GRKERNSEC
64071+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
64072+ const struct cred *cred = current_cred();
64073+
64074+ if (cred->uid && ((grsec_enable_tpe &&
64075+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64076+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
64077+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
64078+#else
64079+ in_group_p(grsec_tpe_gid)
64080+#endif
64081+ ) || gr_acl_tpe_check()) &&
64082+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
64083+ (inode->i_mode & S_IWOTH))))) {
64084+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
64085+ return 0;
64086+ }
64087+#ifdef CONFIG_GRKERNSEC_TPE_ALL
64088+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
64089+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
64090+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
64091+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
64092+ return 0;
64093+ }
64094+#endif
64095+#endif
64096+ return 1;
64097+}
64098diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
64099new file mode 100644
64100index 0000000..9f7b1ac
64101--- /dev/null
64102+++ b/grsecurity/grsum.c
64103@@ -0,0 +1,61 @@
64104+#include <linux/err.h>
64105+#include <linux/kernel.h>
64106+#include <linux/sched.h>
64107+#include <linux/mm.h>
64108+#include <linux/scatterlist.h>
64109+#include <linux/crypto.h>
64110+#include <linux/gracl.h>
64111+
64112+
64113+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
64114+#error "crypto and sha256 must be built into the kernel"
64115+#endif
64116+
64117+int
64118+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
64119+{
64120+ char *p;
64121+ struct crypto_hash *tfm;
64122+ struct hash_desc desc;
64123+ struct scatterlist sg;
64124+ unsigned char temp_sum[GR_SHA_LEN];
64125+ volatile int retval = 0;
64126+ volatile int dummy = 0;
64127+ unsigned int i;
64128+
64129+ sg_init_table(&sg, 1);
64130+
64131+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
64132+ if (IS_ERR(tfm)) {
64133+ /* should never happen, since sha256 should be built in */
64134+ return 1;
64135+ }
64136+
64137+ desc.tfm = tfm;
64138+ desc.flags = 0;
64139+
64140+ crypto_hash_init(&desc);
64141+
64142+ p = salt;
64143+ sg_set_buf(&sg, p, GR_SALT_LEN);
64144+ crypto_hash_update(&desc, &sg, sg.length);
64145+
64146+ p = entry->pw;
64147+ sg_set_buf(&sg, p, strlen(p));
64148+
64149+ crypto_hash_update(&desc, &sg, sg.length);
64150+
64151+ crypto_hash_final(&desc, temp_sum);
64152+
64153+ memset(entry->pw, 0, GR_PW_LEN);
64154+
64155+ for (i = 0; i < GR_SHA_LEN; i++)
64156+ if (sum[i] != temp_sum[i])
64157+ retval = 1;
64158+ else
64159+ dummy = 1; // waste a cycle
64160+
64161+ crypto_free_hash(tfm);
64162+
64163+ return retval;
64164+}
64165diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
64166index 3cd9ccd..fe16d47 100644
64167--- a/include/acpi/acpi_bus.h
64168+++ b/include/acpi/acpi_bus.h
64169@@ -107,7 +107,7 @@ struct acpi_device_ops {
64170 acpi_op_bind bind;
64171 acpi_op_unbind unbind;
64172 acpi_op_notify notify;
64173-};
64174+} __no_const;
64175
64176 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
64177
64178diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
64179index f4906f6..71feb73 100644
64180--- a/include/acpi/acpi_drivers.h
64181+++ b/include/acpi/acpi_drivers.h
64182@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
64183 Dock Station
64184 -------------------------------------------------------------------------- */
64185 struct acpi_dock_ops {
64186- acpi_notify_handler handler;
64187- acpi_notify_handler uevent;
64188+ const acpi_notify_handler handler;
64189+ const acpi_notify_handler uevent;
64190 };
64191
64192 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
64193@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
64194 extern int register_dock_notifier(struct notifier_block *nb);
64195 extern void unregister_dock_notifier(struct notifier_block *nb);
64196 extern int register_hotplug_dock_device(acpi_handle handle,
64197- struct acpi_dock_ops *ops,
64198+ const struct acpi_dock_ops *ops,
64199 void *context);
64200 extern void unregister_hotplug_dock_device(acpi_handle handle);
64201 #else
64202@@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
64203 {
64204 }
64205 static inline int register_hotplug_dock_device(acpi_handle handle,
64206- struct acpi_dock_ops *ops,
64207+ const struct acpi_dock_ops *ops,
64208 void *context)
64209 {
64210 return -ENODEV;
64211diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
64212index b7babf0..a9ac9fc 100644
64213--- a/include/asm-generic/atomic-long.h
64214+++ b/include/asm-generic/atomic-long.h
64215@@ -22,6 +22,12 @@
64216
64217 typedef atomic64_t atomic_long_t;
64218
64219+#ifdef CONFIG_PAX_REFCOUNT
64220+typedef atomic64_unchecked_t atomic_long_unchecked_t;
64221+#else
64222+typedef atomic64_t atomic_long_unchecked_t;
64223+#endif
64224+
64225 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
64226
64227 static inline long atomic_long_read(atomic_long_t *l)
64228@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
64229 return (long)atomic64_read(v);
64230 }
64231
64232+#ifdef CONFIG_PAX_REFCOUNT
64233+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
64234+{
64235+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64236+
64237+ return (long)atomic64_read_unchecked(v);
64238+}
64239+#endif
64240+
64241 static inline void atomic_long_set(atomic_long_t *l, long i)
64242 {
64243 atomic64_t *v = (atomic64_t *)l;
64244@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
64245 atomic64_set(v, i);
64246 }
64247
64248+#ifdef CONFIG_PAX_REFCOUNT
64249+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
64250+{
64251+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64252+
64253+ atomic64_set_unchecked(v, i);
64254+}
64255+#endif
64256+
64257 static inline void atomic_long_inc(atomic_long_t *l)
64258 {
64259 atomic64_t *v = (atomic64_t *)l;
64260@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
64261 atomic64_inc(v);
64262 }
64263
64264+#ifdef CONFIG_PAX_REFCOUNT
64265+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
64266+{
64267+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64268+
64269+ atomic64_inc_unchecked(v);
64270+}
64271+#endif
64272+
64273 static inline void atomic_long_dec(atomic_long_t *l)
64274 {
64275 atomic64_t *v = (atomic64_t *)l;
64276@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
64277 atomic64_dec(v);
64278 }
64279
64280+#ifdef CONFIG_PAX_REFCOUNT
64281+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
64282+{
64283+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64284+
64285+ atomic64_dec_unchecked(v);
64286+}
64287+#endif
64288+
64289 static inline void atomic_long_add(long i, atomic_long_t *l)
64290 {
64291 atomic64_t *v = (atomic64_t *)l;
64292@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
64293 atomic64_add(i, v);
64294 }
64295
64296+#ifdef CONFIG_PAX_REFCOUNT
64297+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
64298+{
64299+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64300+
64301+ atomic64_add_unchecked(i, v);
64302+}
64303+#endif
64304+
64305 static inline void atomic_long_sub(long i, atomic_long_t *l)
64306 {
64307 atomic64_t *v = (atomic64_t *)l;
64308@@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
64309 return (long)atomic64_inc_return(v);
64310 }
64311
64312+#ifdef CONFIG_PAX_REFCOUNT
64313+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
64314+{
64315+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64316+
64317+ return (long)atomic64_inc_return_unchecked(v);
64318+}
64319+#endif
64320+
64321 static inline long atomic_long_dec_return(atomic_long_t *l)
64322 {
64323 atomic64_t *v = (atomic64_t *)l;
64324@@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
64325
64326 typedef atomic_t atomic_long_t;
64327
64328+#ifdef CONFIG_PAX_REFCOUNT
64329+typedef atomic_unchecked_t atomic_long_unchecked_t;
64330+#else
64331+typedef atomic_t atomic_long_unchecked_t;
64332+#endif
64333+
64334 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
64335 static inline long atomic_long_read(atomic_long_t *l)
64336 {
64337@@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
64338 return (long)atomic_read(v);
64339 }
64340
64341+#ifdef CONFIG_PAX_REFCOUNT
64342+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
64343+{
64344+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64345+
64346+ return (long)atomic_read_unchecked(v);
64347+}
64348+#endif
64349+
64350 static inline void atomic_long_set(atomic_long_t *l, long i)
64351 {
64352 atomic_t *v = (atomic_t *)l;
64353@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
64354 atomic_set(v, i);
64355 }
64356
64357+#ifdef CONFIG_PAX_REFCOUNT
64358+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
64359+{
64360+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64361+
64362+ atomic_set_unchecked(v, i);
64363+}
64364+#endif
64365+
64366 static inline void atomic_long_inc(atomic_long_t *l)
64367 {
64368 atomic_t *v = (atomic_t *)l;
64369@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
64370 atomic_inc(v);
64371 }
64372
64373+#ifdef CONFIG_PAX_REFCOUNT
64374+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
64375+{
64376+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64377+
64378+ atomic_inc_unchecked(v);
64379+}
64380+#endif
64381+
64382 static inline void atomic_long_dec(atomic_long_t *l)
64383 {
64384 atomic_t *v = (atomic_t *)l;
64385@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
64386 atomic_dec(v);
64387 }
64388
64389+#ifdef CONFIG_PAX_REFCOUNT
64390+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
64391+{
64392+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64393+
64394+ atomic_dec_unchecked(v);
64395+}
64396+#endif
64397+
64398 static inline void atomic_long_add(long i, atomic_long_t *l)
64399 {
64400 atomic_t *v = (atomic_t *)l;
64401@@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
64402 atomic_add(i, v);
64403 }
64404
64405+#ifdef CONFIG_PAX_REFCOUNT
64406+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
64407+{
64408+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64409+
64410+ atomic_add_unchecked(i, v);
64411+}
64412+#endif
64413+
64414 static inline void atomic_long_sub(long i, atomic_long_t *l)
64415 {
64416 atomic_t *v = (atomic_t *)l;
64417@@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
64418 return (long)atomic_inc_return(v);
64419 }
64420
64421+#ifdef CONFIG_PAX_REFCOUNT
64422+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
64423+{
64424+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64425+
64426+ return (long)atomic_inc_return_unchecked(v);
64427+}
64428+#endif
64429+
64430 static inline long atomic_long_dec_return(atomic_long_t *l)
64431 {
64432 atomic_t *v = (atomic_t *)l;
64433@@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
64434
64435 #endif /* BITS_PER_LONG == 64 */
64436
64437+#ifdef CONFIG_PAX_REFCOUNT
64438+static inline void pax_refcount_needs_these_functions(void)
64439+{
64440+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
64441+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
64442+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
64443+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
64444+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
64445+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
64446+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
64447+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
64448+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
64449+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
64450+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
64451+
64452+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
64453+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
64454+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
64455+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
64456+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
64457+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
64458+}
64459+#else
64460+#define atomic_read_unchecked(v) atomic_read(v)
64461+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
64462+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
64463+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
64464+#define atomic_inc_unchecked(v) atomic_inc(v)
64465+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
64466+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
64467+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
64468+#define atomic_dec_unchecked(v) atomic_dec(v)
64469+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
64470+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
64471+
64472+#define atomic_long_read_unchecked(v) atomic_long_read(v)
64473+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
64474+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
64475+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
64476+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
64477+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
64478+#endif
64479+
64480 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
64481diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
64482index d48ddf0..656a0ac 100644
64483--- a/include/asm-generic/bug.h
64484+++ b/include/asm-generic/bug.h
64485@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
64486
64487 #else /* !CONFIG_BUG */
64488 #ifndef HAVE_ARCH_BUG
64489-#define BUG() do {} while(0)
64490+#define BUG() do { for (;;) ; } while(0)
64491 #endif
64492
64493 #ifndef HAVE_ARCH_BUG_ON
64494-#define BUG_ON(condition) do { if (condition) ; } while(0)
64495+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
64496 #endif
64497
64498 #ifndef HAVE_ARCH_WARN_ON
64499diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
64500index 1bfcfe5..e04c5c9 100644
64501--- a/include/asm-generic/cache.h
64502+++ b/include/asm-generic/cache.h
64503@@ -6,7 +6,7 @@
64504 * cache lines need to provide their own cache.h.
64505 */
64506
64507-#define L1_CACHE_SHIFT 5
64508-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
64509+#define L1_CACHE_SHIFT 5UL
64510+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
64511
64512 #endif /* __ASM_GENERIC_CACHE_H */
64513diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
64514index 6920695..41038bc 100644
64515--- a/include/asm-generic/dma-mapping-common.h
64516+++ b/include/asm-generic/dma-mapping-common.h
64517@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
64518 enum dma_data_direction dir,
64519 struct dma_attrs *attrs)
64520 {
64521- struct dma_map_ops *ops = get_dma_ops(dev);
64522+ const struct dma_map_ops *ops = get_dma_ops(dev);
64523 dma_addr_t addr;
64524
64525 kmemcheck_mark_initialized(ptr, size);
64526@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
64527 enum dma_data_direction dir,
64528 struct dma_attrs *attrs)
64529 {
64530- struct dma_map_ops *ops = get_dma_ops(dev);
64531+ const struct dma_map_ops *ops = get_dma_ops(dev);
64532
64533 BUG_ON(!valid_dma_direction(dir));
64534 if (ops->unmap_page)
64535@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
64536 int nents, enum dma_data_direction dir,
64537 struct dma_attrs *attrs)
64538 {
64539- struct dma_map_ops *ops = get_dma_ops(dev);
64540+ const struct dma_map_ops *ops = get_dma_ops(dev);
64541 int i, ents;
64542 struct scatterlist *s;
64543
64544@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
64545 int nents, enum dma_data_direction dir,
64546 struct dma_attrs *attrs)
64547 {
64548- struct dma_map_ops *ops = get_dma_ops(dev);
64549+ const struct dma_map_ops *ops = get_dma_ops(dev);
64550
64551 BUG_ON(!valid_dma_direction(dir));
64552 debug_dma_unmap_sg(dev, sg, nents, dir);
64553@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
64554 size_t offset, size_t size,
64555 enum dma_data_direction dir)
64556 {
64557- struct dma_map_ops *ops = get_dma_ops(dev);
64558+ const struct dma_map_ops *ops = get_dma_ops(dev);
64559 dma_addr_t addr;
64560
64561 kmemcheck_mark_initialized(page_address(page) + offset, size);
64562@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
64563 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
64564 size_t size, enum dma_data_direction dir)
64565 {
64566- struct dma_map_ops *ops = get_dma_ops(dev);
64567+ const struct dma_map_ops *ops = get_dma_ops(dev);
64568
64569 BUG_ON(!valid_dma_direction(dir));
64570 if (ops->unmap_page)
64571@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
64572 size_t size,
64573 enum dma_data_direction dir)
64574 {
64575- struct dma_map_ops *ops = get_dma_ops(dev);
64576+ const struct dma_map_ops *ops = get_dma_ops(dev);
64577
64578 BUG_ON(!valid_dma_direction(dir));
64579 if (ops->sync_single_for_cpu)
64580@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
64581 dma_addr_t addr, size_t size,
64582 enum dma_data_direction dir)
64583 {
64584- struct dma_map_ops *ops = get_dma_ops(dev);
64585+ const struct dma_map_ops *ops = get_dma_ops(dev);
64586
64587 BUG_ON(!valid_dma_direction(dir));
64588 if (ops->sync_single_for_device)
64589@@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
64590 size_t size,
64591 enum dma_data_direction dir)
64592 {
64593- struct dma_map_ops *ops = get_dma_ops(dev);
64594+ const struct dma_map_ops *ops = get_dma_ops(dev);
64595
64596 BUG_ON(!valid_dma_direction(dir));
64597 if (ops->sync_single_range_for_cpu) {
64598@@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
64599 size_t size,
64600 enum dma_data_direction dir)
64601 {
64602- struct dma_map_ops *ops = get_dma_ops(dev);
64603+ const struct dma_map_ops *ops = get_dma_ops(dev);
64604
64605 BUG_ON(!valid_dma_direction(dir));
64606 if (ops->sync_single_range_for_device) {
64607@@ -155,7 +155,7 @@ static inline void
64608 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
64609 int nelems, enum dma_data_direction dir)
64610 {
64611- struct dma_map_ops *ops = get_dma_ops(dev);
64612+ const struct dma_map_ops *ops = get_dma_ops(dev);
64613
64614 BUG_ON(!valid_dma_direction(dir));
64615 if (ops->sync_sg_for_cpu)
64616@@ -167,7 +167,7 @@ static inline void
64617 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
64618 int nelems, enum dma_data_direction dir)
64619 {
64620- struct dma_map_ops *ops = get_dma_ops(dev);
64621+ const struct dma_map_ops *ops = get_dma_ops(dev);
64622
64623 BUG_ON(!valid_dma_direction(dir));
64624 if (ops->sync_sg_for_device)
64625diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
64626index 0d68a1e..b74a761 100644
64627--- a/include/asm-generic/emergency-restart.h
64628+++ b/include/asm-generic/emergency-restart.h
64629@@ -1,7 +1,7 @@
64630 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
64631 #define _ASM_GENERIC_EMERGENCY_RESTART_H
64632
64633-static inline void machine_emergency_restart(void)
64634+static inline __noreturn void machine_emergency_restart(void)
64635 {
64636 machine_restart(NULL);
64637 }
64638diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
64639index 3c2344f..4590a7d 100644
64640--- a/include/asm-generic/futex.h
64641+++ b/include/asm-generic/futex.h
64642@@ -6,7 +6,7 @@
64643 #include <asm/errno.h>
64644
64645 static inline int
64646-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
64647+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
64648 {
64649 int op = (encoded_op >> 28) & 7;
64650 int cmp = (encoded_op >> 24) & 15;
64651@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
64652 }
64653
64654 static inline int
64655-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
64656+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
64657 {
64658 return -ENOSYS;
64659 }
64660diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
64661index 1ca3efc..e3dc852 100644
64662--- a/include/asm-generic/int-l64.h
64663+++ b/include/asm-generic/int-l64.h
64664@@ -46,6 +46,8 @@ typedef unsigned int u32;
64665 typedef signed long s64;
64666 typedef unsigned long u64;
64667
64668+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
64669+
64670 #define S8_C(x) x
64671 #define U8_C(x) x ## U
64672 #define S16_C(x) x
64673diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
64674index f394147..b6152b9 100644
64675--- a/include/asm-generic/int-ll64.h
64676+++ b/include/asm-generic/int-ll64.h
64677@@ -51,6 +51,8 @@ typedef unsigned int u32;
64678 typedef signed long long s64;
64679 typedef unsigned long long u64;
64680
64681+typedef unsigned long long intoverflow_t;
64682+
64683 #define S8_C(x) x
64684 #define U8_C(x) x ## U
64685 #define S16_C(x) x
64686diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
64687index e5f234a..cdb16b3 100644
64688--- a/include/asm-generic/kmap_types.h
64689+++ b/include/asm-generic/kmap_types.h
64690@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
64691 KMAP_D(16) KM_IRQ_PTE,
64692 KMAP_D(17) KM_NMI,
64693 KMAP_D(18) KM_NMI_PTE,
64694-KMAP_D(19) KM_TYPE_NR
64695+KMAP_D(19) KM_CLEARPAGE,
64696+KMAP_D(20) KM_TYPE_NR
64697 };
64698
64699 #undef KMAP_D
64700diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
64701index 725612b..9cc513a 100644
64702--- a/include/asm-generic/pgtable-nopmd.h
64703+++ b/include/asm-generic/pgtable-nopmd.h
64704@@ -1,14 +1,19 @@
64705 #ifndef _PGTABLE_NOPMD_H
64706 #define _PGTABLE_NOPMD_H
64707
64708-#ifndef __ASSEMBLY__
64709-
64710 #include <asm-generic/pgtable-nopud.h>
64711
64712-struct mm_struct;
64713-
64714 #define __PAGETABLE_PMD_FOLDED
64715
64716+#define PMD_SHIFT PUD_SHIFT
64717+#define PTRS_PER_PMD 1
64718+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
64719+#define PMD_MASK (~(PMD_SIZE-1))
64720+
64721+#ifndef __ASSEMBLY__
64722+
64723+struct mm_struct;
64724+
64725 /*
64726 * Having the pmd type consist of a pud gets the size right, and allows
64727 * us to conceptually access the pud entry that this pmd is folded into
64728@@ -16,11 +21,6 @@ struct mm_struct;
64729 */
64730 typedef struct { pud_t pud; } pmd_t;
64731
64732-#define PMD_SHIFT PUD_SHIFT
64733-#define PTRS_PER_PMD 1
64734-#define PMD_SIZE (1UL << PMD_SHIFT)
64735-#define PMD_MASK (~(PMD_SIZE-1))
64736-
64737 /*
64738 * The "pud_xxx()" functions here are trivial for a folded two-level
64739 * setup: the pmd is never bad, and a pmd always exists (as it's folded
64740diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
64741index 810431d..ccc3638 100644
64742--- a/include/asm-generic/pgtable-nopud.h
64743+++ b/include/asm-generic/pgtable-nopud.h
64744@@ -1,10 +1,15 @@
64745 #ifndef _PGTABLE_NOPUD_H
64746 #define _PGTABLE_NOPUD_H
64747
64748-#ifndef __ASSEMBLY__
64749-
64750 #define __PAGETABLE_PUD_FOLDED
64751
64752+#define PUD_SHIFT PGDIR_SHIFT
64753+#define PTRS_PER_PUD 1
64754+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
64755+#define PUD_MASK (~(PUD_SIZE-1))
64756+
64757+#ifndef __ASSEMBLY__
64758+
64759 /*
64760 * Having the pud type consist of a pgd gets the size right, and allows
64761 * us to conceptually access the pgd entry that this pud is folded into
64762@@ -12,11 +17,6 @@
64763 */
64764 typedef struct { pgd_t pgd; } pud_t;
64765
64766-#define PUD_SHIFT PGDIR_SHIFT
64767-#define PTRS_PER_PUD 1
64768-#define PUD_SIZE (1UL << PUD_SHIFT)
64769-#define PUD_MASK (~(PUD_SIZE-1))
64770-
64771 /*
64772 * The "pgd_xxx()" functions here are trivial for a folded two-level
64773 * setup: the pud is never bad, and a pud always exists (as it's folded
64774diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
64775index e2bd73e..fea8ed3 100644
64776--- a/include/asm-generic/pgtable.h
64777+++ b/include/asm-generic/pgtable.h
64778@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
64779 unsigned long size);
64780 #endif
64781
64782+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
64783+static inline unsigned long pax_open_kernel(void) { return 0; }
64784+#endif
64785+
64786+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
64787+static inline unsigned long pax_close_kernel(void) { return 0; }
64788+#endif
64789+
64790 #endif /* !__ASSEMBLY__ */
64791
64792 #endif /* _ASM_GENERIC_PGTABLE_H */
64793diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
64794index b6e818f..21aa58a 100644
64795--- a/include/asm-generic/vmlinux.lds.h
64796+++ b/include/asm-generic/vmlinux.lds.h
64797@@ -199,6 +199,7 @@
64798 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
64799 VMLINUX_SYMBOL(__start_rodata) = .; \
64800 *(.rodata) *(.rodata.*) \
64801+ *(.data.read_only) \
64802 *(__vermagic) /* Kernel version magic */ \
64803 *(__markers_strings) /* Markers: strings */ \
64804 *(__tracepoints_strings)/* Tracepoints: strings */ \
64805@@ -656,22 +657,24 @@
64806 * section in the linker script will go there too. @phdr should have
64807 * a leading colon.
64808 *
64809- * Note that this macros defines __per_cpu_load as an absolute symbol.
64810+ * Note that this macros defines per_cpu_load as an absolute symbol.
64811 * If there is no need to put the percpu section at a predetermined
64812 * address, use PERCPU().
64813 */
64814 #define PERCPU_VADDR(vaddr, phdr) \
64815- VMLINUX_SYMBOL(__per_cpu_load) = .; \
64816- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
64817+ per_cpu_load = .; \
64818+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
64819 - LOAD_OFFSET) { \
64820+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
64821 VMLINUX_SYMBOL(__per_cpu_start) = .; \
64822 *(.data.percpu.first) \
64823- *(.data.percpu.page_aligned) \
64824 *(.data.percpu) \
64825+ . = ALIGN(PAGE_SIZE); \
64826+ *(.data.percpu.page_aligned) \
64827 *(.data.percpu.shared_aligned) \
64828 VMLINUX_SYMBOL(__per_cpu_end) = .; \
64829 } phdr \
64830- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
64831+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
64832
64833 /**
64834 * PERCPU - define output section for percpu area, simple version
64835diff --git a/include/drm/drmP.h b/include/drm/drmP.h
64836index 66713c6..98c0460 100644
64837--- a/include/drm/drmP.h
64838+++ b/include/drm/drmP.h
64839@@ -71,6 +71,7 @@
64840 #include <linux/workqueue.h>
64841 #include <linux/poll.h>
64842 #include <asm/pgalloc.h>
64843+#include <asm/local.h>
64844 #include "drm.h"
64845
64846 #include <linux/idr.h>
64847@@ -814,7 +815,7 @@ struct drm_driver {
64848 void (*vgaarb_irq)(struct drm_device *dev, bool state);
64849
64850 /* Driver private ops for this object */
64851- struct vm_operations_struct *gem_vm_ops;
64852+ const struct vm_operations_struct *gem_vm_ops;
64853
64854 int major;
64855 int minor;
64856@@ -917,7 +918,7 @@ struct drm_device {
64857
64858 /** \name Usage Counters */
64859 /*@{ */
64860- int open_count; /**< Outstanding files open */
64861+ local_t open_count; /**< Outstanding files open */
64862 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
64863 atomic_t vma_count; /**< Outstanding vma areas open */
64864 int buf_use; /**< Buffers in use -- cannot alloc */
64865@@ -928,7 +929,7 @@ struct drm_device {
64866 /*@{ */
64867 unsigned long counters;
64868 enum drm_stat_type types[15];
64869- atomic_t counts[15];
64870+ atomic_unchecked_t counts[15];
64871 /*@} */
64872
64873 struct list_head filelist;
64874@@ -1016,7 +1017,7 @@ struct drm_device {
64875 struct pci_controller *hose;
64876 #endif
64877 struct drm_sg_mem *sg; /**< Scatter gather memory */
64878- unsigned int num_crtcs; /**< Number of CRTCs on this device */
64879+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
64880 void *dev_private; /**< device private data */
64881 void *mm_private;
64882 struct address_space *dev_mapping;
64883@@ -1042,11 +1043,11 @@ struct drm_device {
64884 spinlock_t object_name_lock;
64885 struct idr object_name_idr;
64886 atomic_t object_count;
64887- atomic_t object_memory;
64888+ atomic_unchecked_t object_memory;
64889 atomic_t pin_count;
64890- atomic_t pin_memory;
64891+ atomic_unchecked_t pin_memory;
64892 atomic_t gtt_count;
64893- atomic_t gtt_memory;
64894+ atomic_unchecked_t gtt_memory;
64895 uint32_t gtt_total;
64896 uint32_t invalidate_domains; /* domains pending invalidation */
64897 uint32_t flush_domains; /* domains pending flush */
64898diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
64899index b29e201..3413cc9 100644
64900--- a/include/drm/drm_crtc_helper.h
64901+++ b/include/drm/drm_crtc_helper.h
64902@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
64903
64904 /* reload the current crtc LUT */
64905 void (*load_lut)(struct drm_crtc *crtc);
64906-};
64907+} __no_const;
64908
64909 struct drm_encoder_helper_funcs {
64910 void (*dpms)(struct drm_encoder *encoder, int mode);
64911@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
64912 struct drm_connector *connector);
64913 /* disable encoder when not in use - more explicit than dpms off */
64914 void (*disable)(struct drm_encoder *encoder);
64915-};
64916+} __no_const;
64917
64918 struct drm_connector_helper_funcs {
64919 int (*get_modes)(struct drm_connector *connector);
64920diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
64921index b199170..6f9e64c 100644
64922--- a/include/drm/ttm/ttm_memory.h
64923+++ b/include/drm/ttm/ttm_memory.h
64924@@ -47,7 +47,7 @@
64925
64926 struct ttm_mem_shrink {
64927 int (*do_shrink) (struct ttm_mem_shrink *);
64928-};
64929+} __no_const;
64930
64931 /**
64932 * struct ttm_mem_global - Global memory accounting structure.
64933diff --git a/include/linux/a.out.h b/include/linux/a.out.h
64934index e86dfca..40cc55f 100644
64935--- a/include/linux/a.out.h
64936+++ b/include/linux/a.out.h
64937@@ -39,6 +39,14 @@ enum machine_type {
64938 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
64939 };
64940
64941+/* Constants for the N_FLAGS field */
64942+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
64943+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
64944+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
64945+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
64946+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
64947+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
64948+
64949 #if !defined (N_MAGIC)
64950 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
64951 #endif
64952diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
64953index 817b237..62c10bc 100644
64954--- a/include/linux/atmdev.h
64955+++ b/include/linux/atmdev.h
64956@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
64957 #endif
64958
64959 struct k_atm_aal_stats {
64960-#define __HANDLE_ITEM(i) atomic_t i
64961+#define __HANDLE_ITEM(i) atomic_unchecked_t i
64962 __AAL_STAT_ITEMS
64963 #undef __HANDLE_ITEM
64964 };
64965diff --git a/include/linux/backlight.h b/include/linux/backlight.h
64966index 0f5f578..8c4f884 100644
64967--- a/include/linux/backlight.h
64968+++ b/include/linux/backlight.h
64969@@ -36,18 +36,18 @@ struct backlight_device;
64970 struct fb_info;
64971
64972 struct backlight_ops {
64973- unsigned int options;
64974+ const unsigned int options;
64975
64976 #define BL_CORE_SUSPENDRESUME (1 << 0)
64977
64978 /* Notify the backlight driver some property has changed */
64979- int (*update_status)(struct backlight_device *);
64980+ int (* const update_status)(struct backlight_device *);
64981 /* Return the current backlight brightness (accounting for power,
64982 fb_blank etc.) */
64983- int (*get_brightness)(struct backlight_device *);
64984+ int (* const get_brightness)(struct backlight_device *);
64985 /* Check if given framebuffer device is the one bound to this backlight;
64986 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
64987- int (*check_fb)(struct fb_info *);
64988+ int (* const check_fb)(struct fb_info *);
64989 };
64990
64991 /* This structure defines all the properties of a backlight */
64992@@ -86,7 +86,7 @@ struct backlight_device {
64993 registered this device has been unloaded, and if class_get_devdata()
64994 points to something in the body of that driver, it is also invalid. */
64995 struct mutex ops_lock;
64996- struct backlight_ops *ops;
64997+ const struct backlight_ops *ops;
64998
64999 /* The framebuffer notifier block */
65000 struct notifier_block fb_notif;
65001@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
65002 }
65003
65004 extern struct backlight_device *backlight_device_register(const char *name,
65005- struct device *dev, void *devdata, struct backlight_ops *ops);
65006+ struct device *dev, void *devdata, const struct backlight_ops *ops);
65007 extern void backlight_device_unregister(struct backlight_device *bd);
65008 extern void backlight_force_update(struct backlight_device *bd,
65009 enum backlight_update_reason reason);
65010diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
65011index a3d802e..482f69c 100644
65012--- a/include/linux/binfmts.h
65013+++ b/include/linux/binfmts.h
65014@@ -83,6 +83,7 @@ struct linux_binfmt {
65015 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
65016 int (*load_shlib)(struct file *);
65017 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
65018+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
65019 unsigned long min_coredump; /* minimal dump size */
65020 int hasvdso;
65021 };
65022diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
65023index a06bfab..4fa38bb 100644
65024--- a/include/linux/blkdev.h
65025+++ b/include/linux/blkdev.h
65026@@ -1278,7 +1278,7 @@ struct block_device_operations {
65027 int (*revalidate_disk) (struct gendisk *);
65028 int (*getgeo)(struct block_device *, struct hd_geometry *);
65029 struct module *owner;
65030-};
65031+} __do_const;
65032
65033 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
65034 unsigned long);
65035diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
65036index 3b73b99..629d21b 100644
65037--- a/include/linux/blktrace_api.h
65038+++ b/include/linux/blktrace_api.h
65039@@ -160,7 +160,7 @@ struct blk_trace {
65040 struct dentry *dir;
65041 struct dentry *dropped_file;
65042 struct dentry *msg_file;
65043- atomic_t dropped;
65044+ atomic_unchecked_t dropped;
65045 };
65046
65047 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
65048diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
65049index 83195fb..0b0f77d 100644
65050--- a/include/linux/byteorder/little_endian.h
65051+++ b/include/linux/byteorder/little_endian.h
65052@@ -42,51 +42,51 @@
65053
65054 static inline __le64 __cpu_to_le64p(const __u64 *p)
65055 {
65056- return (__force __le64)*p;
65057+ return (__force const __le64)*p;
65058 }
65059 static inline __u64 __le64_to_cpup(const __le64 *p)
65060 {
65061- return (__force __u64)*p;
65062+ return (__force const __u64)*p;
65063 }
65064 static inline __le32 __cpu_to_le32p(const __u32 *p)
65065 {
65066- return (__force __le32)*p;
65067+ return (__force const __le32)*p;
65068 }
65069 static inline __u32 __le32_to_cpup(const __le32 *p)
65070 {
65071- return (__force __u32)*p;
65072+ return (__force const __u32)*p;
65073 }
65074 static inline __le16 __cpu_to_le16p(const __u16 *p)
65075 {
65076- return (__force __le16)*p;
65077+ return (__force const __le16)*p;
65078 }
65079 static inline __u16 __le16_to_cpup(const __le16 *p)
65080 {
65081- return (__force __u16)*p;
65082+ return (__force const __u16)*p;
65083 }
65084 static inline __be64 __cpu_to_be64p(const __u64 *p)
65085 {
65086- return (__force __be64)__swab64p(p);
65087+ return (__force const __be64)__swab64p(p);
65088 }
65089 static inline __u64 __be64_to_cpup(const __be64 *p)
65090 {
65091- return __swab64p((__u64 *)p);
65092+ return __swab64p((const __u64 *)p);
65093 }
65094 static inline __be32 __cpu_to_be32p(const __u32 *p)
65095 {
65096- return (__force __be32)__swab32p(p);
65097+ return (__force const __be32)__swab32p(p);
65098 }
65099 static inline __u32 __be32_to_cpup(const __be32 *p)
65100 {
65101- return __swab32p((__u32 *)p);
65102+ return __swab32p((const __u32 *)p);
65103 }
65104 static inline __be16 __cpu_to_be16p(const __u16 *p)
65105 {
65106- return (__force __be16)__swab16p(p);
65107+ return (__force const __be16)__swab16p(p);
65108 }
65109 static inline __u16 __be16_to_cpup(const __be16 *p)
65110 {
65111- return __swab16p((__u16 *)p);
65112+ return __swab16p((const __u16 *)p);
65113 }
65114 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
65115 #define __le64_to_cpus(x) do { (void)(x); } while (0)
65116diff --git a/include/linux/cache.h b/include/linux/cache.h
65117index 97e2488..e7576b9 100644
65118--- a/include/linux/cache.h
65119+++ b/include/linux/cache.h
65120@@ -16,6 +16,10 @@
65121 #define __read_mostly
65122 #endif
65123
65124+#ifndef __read_only
65125+#define __read_only __read_mostly
65126+#endif
65127+
65128 #ifndef ____cacheline_aligned
65129 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
65130 #endif
65131diff --git a/include/linux/capability.h b/include/linux/capability.h
65132index c8f2a5f7..1618a5c 100644
65133--- a/include/linux/capability.h
65134+++ b/include/linux/capability.h
65135@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
65136 (security_real_capable_noaudit((t), (cap)) == 0)
65137
65138 extern int capable(int cap);
65139+int capable_nolog(int cap);
65140
65141 /* audit system wants to get cap info from files as well */
65142 struct dentry;
65143diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
65144index 450fa59..86019fb 100644
65145--- a/include/linux/compiler-gcc4.h
65146+++ b/include/linux/compiler-gcc4.h
65147@@ -36,4 +36,16 @@
65148 the kernel context */
65149 #define __cold __attribute__((__cold__))
65150
65151+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
65152+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
65153+#define __bos0(ptr) __bos((ptr), 0)
65154+#define __bos1(ptr) __bos((ptr), 1)
65155+
65156+#if __GNUC_MINOR__ >= 5
65157+#ifdef CONSTIFY_PLUGIN
65158+#define __no_const __attribute__((no_const))
65159+#define __do_const __attribute__((do_const))
65160+#endif
65161+#endif
65162+
65163 #endif
65164diff --git a/include/linux/compiler.h b/include/linux/compiler.h
65165index 04fb513..fd6477b 100644
65166--- a/include/linux/compiler.h
65167+++ b/include/linux/compiler.h
65168@@ -5,11 +5,14 @@
65169
65170 #ifdef __CHECKER__
65171 # define __user __attribute__((noderef, address_space(1)))
65172+# define __force_user __force __user
65173 # define __kernel /* default address space */
65174+# define __force_kernel __force __kernel
65175 # define __safe __attribute__((safe))
65176 # define __force __attribute__((force))
65177 # define __nocast __attribute__((nocast))
65178 # define __iomem __attribute__((noderef, address_space(2)))
65179+# define __force_iomem __force __iomem
65180 # define __acquires(x) __attribute__((context(x,0,1)))
65181 # define __releases(x) __attribute__((context(x,1,0)))
65182 # define __acquire(x) __context__(x,1)
65183@@ -17,13 +20,34 @@
65184 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
65185 extern void __chk_user_ptr(const volatile void __user *);
65186 extern void __chk_io_ptr(const volatile void __iomem *);
65187+#elif defined(CHECKER_PLUGIN)
65188+//# define __user
65189+//# define __force_user
65190+//# define __kernel
65191+//# define __force_kernel
65192+# define __safe
65193+# define __force
65194+# define __nocast
65195+# define __iomem
65196+# define __force_iomem
65197+# define __chk_user_ptr(x) (void)0
65198+# define __chk_io_ptr(x) (void)0
65199+# define __builtin_warning(x, y...) (1)
65200+# define __acquires(x)
65201+# define __releases(x)
65202+# define __acquire(x) (void)0
65203+# define __release(x) (void)0
65204+# define __cond_lock(x,c) (c)
65205 #else
65206 # define __user
65207+# define __force_user
65208 # define __kernel
65209+# define __force_kernel
65210 # define __safe
65211 # define __force
65212 # define __nocast
65213 # define __iomem
65214+# define __force_iomem
65215 # define __chk_user_ptr(x) (void)0
65216 # define __chk_io_ptr(x) (void)0
65217 # define __builtin_warning(x, y...) (1)
65218@@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65219 # define __attribute_const__ /* unimplemented */
65220 #endif
65221
65222+#ifndef __no_const
65223+# define __no_const
65224+#endif
65225+
65226+#ifndef __do_const
65227+# define __do_const
65228+#endif
65229+
65230 /*
65231 * Tell gcc if a function is cold. The compiler will assume any path
65232 * directly leading to the call is unlikely.
65233@@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65234 #define __cold
65235 #endif
65236
65237+#ifndef __alloc_size
65238+#define __alloc_size(...)
65239+#endif
65240+
65241+#ifndef __bos
65242+#define __bos(ptr, arg)
65243+#endif
65244+
65245+#ifndef __bos0
65246+#define __bos0(ptr)
65247+#endif
65248+
65249+#ifndef __bos1
65250+#define __bos1(ptr)
65251+#endif
65252+
65253 /* Simple shorthand for a section definition */
65254 #ifndef __section
65255 # define __section(S) __attribute__ ((__section__(#S)))
65256@@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65257 * use is to mediate communication between process-level code and irq/NMI
65258 * handlers, all running on the same CPU.
65259 */
65260-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
65261+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
65262+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
65263
65264 #endif /* __LINUX_COMPILER_H */
65265diff --git a/include/linux/crypto.h b/include/linux/crypto.h
65266index fd92988..a3164bd 100644
65267--- a/include/linux/crypto.h
65268+++ b/include/linux/crypto.h
65269@@ -394,7 +394,7 @@ struct cipher_tfm {
65270 const u8 *key, unsigned int keylen);
65271 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
65272 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
65273-};
65274+} __no_const;
65275
65276 struct hash_tfm {
65277 int (*init)(struct hash_desc *desc);
65278@@ -415,13 +415,13 @@ struct compress_tfm {
65279 int (*cot_decompress)(struct crypto_tfm *tfm,
65280 const u8 *src, unsigned int slen,
65281 u8 *dst, unsigned int *dlen);
65282-};
65283+} __no_const;
65284
65285 struct rng_tfm {
65286 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
65287 unsigned int dlen);
65288 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
65289-};
65290+} __no_const;
65291
65292 #define crt_ablkcipher crt_u.ablkcipher
65293 #define crt_aead crt_u.aead
65294diff --git a/include/linux/dcache.h b/include/linux/dcache.h
65295index 30b93b2..cd7a8db 100644
65296--- a/include/linux/dcache.h
65297+++ b/include/linux/dcache.h
65298@@ -119,6 +119,8 @@ struct dentry {
65299 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
65300 };
65301
65302+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
65303+
65304 /*
65305 * dentry->d_lock spinlock nesting subclasses:
65306 *
65307diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
65308index 3e9bd6a..f4e1aa0 100644
65309--- a/include/linux/decompress/mm.h
65310+++ b/include/linux/decompress/mm.h
65311@@ -78,7 +78,7 @@ static void free(void *where)
65312 * warnings when not needed (indeed large_malloc / large_free are not
65313 * needed by inflate */
65314
65315-#define malloc(a) kmalloc(a, GFP_KERNEL)
65316+#define malloc(a) kmalloc((a), GFP_KERNEL)
65317 #define free(a) kfree(a)
65318
65319 #define large_malloc(a) vmalloc(a)
65320diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
65321index 91b7618..92a93d32 100644
65322--- a/include/linux/dma-mapping.h
65323+++ b/include/linux/dma-mapping.h
65324@@ -16,51 +16,51 @@ enum dma_data_direction {
65325 };
65326
65327 struct dma_map_ops {
65328- void* (*alloc_coherent)(struct device *dev, size_t size,
65329+ void* (* const alloc_coherent)(struct device *dev, size_t size,
65330 dma_addr_t *dma_handle, gfp_t gfp);
65331- void (*free_coherent)(struct device *dev, size_t size,
65332+ void (* const free_coherent)(struct device *dev, size_t size,
65333 void *vaddr, dma_addr_t dma_handle);
65334- dma_addr_t (*map_page)(struct device *dev, struct page *page,
65335+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
65336 unsigned long offset, size_t size,
65337 enum dma_data_direction dir,
65338 struct dma_attrs *attrs);
65339- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
65340+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
65341 size_t size, enum dma_data_direction dir,
65342 struct dma_attrs *attrs);
65343- int (*map_sg)(struct device *dev, struct scatterlist *sg,
65344+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
65345 int nents, enum dma_data_direction dir,
65346 struct dma_attrs *attrs);
65347- void (*unmap_sg)(struct device *dev,
65348+ void (* const unmap_sg)(struct device *dev,
65349 struct scatterlist *sg, int nents,
65350 enum dma_data_direction dir,
65351 struct dma_attrs *attrs);
65352- void (*sync_single_for_cpu)(struct device *dev,
65353+ void (* const sync_single_for_cpu)(struct device *dev,
65354 dma_addr_t dma_handle, size_t size,
65355 enum dma_data_direction dir);
65356- void (*sync_single_for_device)(struct device *dev,
65357+ void (* const sync_single_for_device)(struct device *dev,
65358 dma_addr_t dma_handle, size_t size,
65359 enum dma_data_direction dir);
65360- void (*sync_single_range_for_cpu)(struct device *dev,
65361+ void (* const sync_single_range_for_cpu)(struct device *dev,
65362 dma_addr_t dma_handle,
65363 unsigned long offset,
65364 size_t size,
65365 enum dma_data_direction dir);
65366- void (*sync_single_range_for_device)(struct device *dev,
65367+ void (* const sync_single_range_for_device)(struct device *dev,
65368 dma_addr_t dma_handle,
65369 unsigned long offset,
65370 size_t size,
65371 enum dma_data_direction dir);
65372- void (*sync_sg_for_cpu)(struct device *dev,
65373+ void (* const sync_sg_for_cpu)(struct device *dev,
65374 struct scatterlist *sg, int nents,
65375 enum dma_data_direction dir);
65376- void (*sync_sg_for_device)(struct device *dev,
65377+ void (* const sync_sg_for_device)(struct device *dev,
65378 struct scatterlist *sg, int nents,
65379 enum dma_data_direction dir);
65380- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
65381- int (*dma_supported)(struct device *dev, u64 mask);
65382+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
65383+ int (* const dma_supported)(struct device *dev, u64 mask);
65384 int (*set_dma_mask)(struct device *dev, u64 mask);
65385 int is_phys;
65386-};
65387+} __do_const;
65388
65389 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
65390
65391diff --git a/include/linux/dst.h b/include/linux/dst.h
65392index e26fed8..b976d9f 100644
65393--- a/include/linux/dst.h
65394+++ b/include/linux/dst.h
65395@@ -380,7 +380,7 @@ struct dst_node
65396 struct thread_pool *pool;
65397
65398 /* Transaction IDs live here */
65399- atomic_long_t gen;
65400+ atomic_long_unchecked_t gen;
65401
65402 /*
65403 * How frequently and how many times transaction
65404diff --git a/include/linux/elf.h b/include/linux/elf.h
65405index 90a4ed0..d652617 100644
65406--- a/include/linux/elf.h
65407+++ b/include/linux/elf.h
65408@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
65409 #define PT_GNU_EH_FRAME 0x6474e550
65410
65411 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
65412+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
65413+
65414+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
65415+
65416+/* Constants for the e_flags field */
65417+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
65418+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
65419+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
65420+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
65421+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
65422+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
65423
65424 /* These constants define the different elf file types */
65425 #define ET_NONE 0
65426@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
65427 #define DT_DEBUG 21
65428 #define DT_TEXTREL 22
65429 #define DT_JMPREL 23
65430+#define DT_FLAGS 30
65431+ #define DF_TEXTREL 0x00000004
65432 #define DT_ENCODING 32
65433 #define OLD_DT_LOOS 0x60000000
65434 #define DT_LOOS 0x6000000d
65435@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
65436 #define PF_W 0x2
65437 #define PF_X 0x1
65438
65439+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
65440+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
65441+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
65442+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
65443+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
65444+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
65445+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
65446+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
65447+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
65448+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
65449+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
65450+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
65451+
65452 typedef struct elf32_phdr{
65453 Elf32_Word p_type;
65454 Elf32_Off p_offset;
65455@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
65456 #define EI_OSABI 7
65457 #define EI_PAD 8
65458
65459+#define EI_PAX 14
65460+
65461 #define ELFMAG0 0x7f /* EI_MAG */
65462 #define ELFMAG1 'E'
65463 #define ELFMAG2 'L'
65464@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
65465 #define elf_phdr elf32_phdr
65466 #define elf_note elf32_note
65467 #define elf_addr_t Elf32_Off
65468+#define elf_dyn Elf32_Dyn
65469
65470 #else
65471
65472@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
65473 #define elf_phdr elf64_phdr
65474 #define elf_note elf64_note
65475 #define elf_addr_t Elf64_Off
65476+#define elf_dyn Elf64_Dyn
65477
65478 #endif
65479
65480diff --git a/include/linux/fs.h b/include/linux/fs.h
65481index 1b9a47a..6fe2934 100644
65482--- a/include/linux/fs.h
65483+++ b/include/linux/fs.h
65484@@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
65485 unsigned long, unsigned long);
65486
65487 struct address_space_operations {
65488- int (*writepage)(struct page *page, struct writeback_control *wbc);
65489- int (*readpage)(struct file *, struct page *);
65490- void (*sync_page)(struct page *);
65491+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
65492+ int (* const readpage)(struct file *, struct page *);
65493+ void (* const sync_page)(struct page *);
65494
65495 /* Write back some dirty pages from this mapping. */
65496- int (*writepages)(struct address_space *, struct writeback_control *);
65497+ int (* const writepages)(struct address_space *, struct writeback_control *);
65498
65499 /* Set a page dirty. Return true if this dirtied it */
65500- int (*set_page_dirty)(struct page *page);
65501+ int (* const set_page_dirty)(struct page *page);
65502
65503- int (*readpages)(struct file *filp, struct address_space *mapping,
65504+ int (* const readpages)(struct file *filp, struct address_space *mapping,
65505 struct list_head *pages, unsigned nr_pages);
65506
65507- int (*write_begin)(struct file *, struct address_space *mapping,
65508+ int (* const write_begin)(struct file *, struct address_space *mapping,
65509 loff_t pos, unsigned len, unsigned flags,
65510 struct page **pagep, void **fsdata);
65511- int (*write_end)(struct file *, struct address_space *mapping,
65512+ int (* const write_end)(struct file *, struct address_space *mapping,
65513 loff_t pos, unsigned len, unsigned copied,
65514 struct page *page, void *fsdata);
65515
65516 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
65517- sector_t (*bmap)(struct address_space *, sector_t);
65518- void (*invalidatepage) (struct page *, unsigned long);
65519- int (*releasepage) (struct page *, gfp_t);
65520- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
65521+ sector_t (* const bmap)(struct address_space *, sector_t);
65522+ void (* const invalidatepage) (struct page *, unsigned long);
65523+ int (* const releasepage) (struct page *, gfp_t);
65524+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
65525 loff_t offset, unsigned long nr_segs);
65526- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
65527+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
65528 void **, unsigned long *);
65529 /* migrate the contents of a page to the specified target */
65530- int (*migratepage) (struct address_space *,
65531+ int (* const migratepage) (struct address_space *,
65532 struct page *, struct page *);
65533- int (*launder_page) (struct page *);
65534- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
65535+ int (* const launder_page) (struct page *);
65536+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
65537 unsigned long);
65538- int (*error_remove_page)(struct address_space *, struct page *);
65539+ int (* const error_remove_page)(struct address_space *, struct page *);
65540 };
65541
65542 /*
65543@@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
65544 typedef struct files_struct *fl_owner_t;
65545
65546 struct file_lock_operations {
65547- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
65548- void (*fl_release_private)(struct file_lock *);
65549+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
65550+ void (* const fl_release_private)(struct file_lock *);
65551 };
65552
65553 struct lock_manager_operations {
65554- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
65555- void (*fl_notify)(struct file_lock *); /* unblock callback */
65556- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
65557- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
65558- void (*fl_release_private)(struct file_lock *);
65559- void (*fl_break)(struct file_lock *);
65560- int (*fl_mylease)(struct file_lock *, struct file_lock *);
65561- int (*fl_change)(struct file_lock **, int);
65562+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
65563+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
65564+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
65565+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
65566+ void (* const fl_release_private)(struct file_lock *);
65567+ void (* const fl_break)(struct file_lock *);
65568+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
65569+ int (* const fl_change)(struct file_lock **, int);
65570 };
65571
65572 struct lock_manager {
65573@@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
65574 unsigned int fi_flags; /* Flags as passed from user */
65575 unsigned int fi_extents_mapped; /* Number of mapped extents */
65576 unsigned int fi_extents_max; /* Size of fiemap_extent array */
65577- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
65578+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
65579 * array */
65580 };
65581 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
65582@@ -1512,7 +1512,8 @@ struct file_operations {
65583 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
65584 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
65585 int (*setlease)(struct file *, long, struct file_lock **);
65586-};
65587+} __do_const;
65588+typedef struct file_operations __no_const file_operations_no_const;
65589
65590 struct inode_operations {
65591 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
65592@@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
65593 unsigned long, loff_t *);
65594
65595 struct super_operations {
65596- struct inode *(*alloc_inode)(struct super_block *sb);
65597- void (*destroy_inode)(struct inode *);
65598-
65599- void (*dirty_inode) (struct inode *);
65600- int (*write_inode) (struct inode *, int);
65601- void (*drop_inode) (struct inode *);
65602- void (*delete_inode) (struct inode *);
65603- void (*put_super) (struct super_block *);
65604- void (*write_super) (struct super_block *);
65605- int (*sync_fs)(struct super_block *sb, int wait);
65606- int (*freeze_fs) (struct super_block *);
65607- int (*unfreeze_fs) (struct super_block *);
65608- int (*statfs) (struct dentry *, struct kstatfs *);
65609- int (*remount_fs) (struct super_block *, int *, char *);
65610- void (*clear_inode) (struct inode *);
65611- void (*umount_begin) (struct super_block *);
65612-
65613- int (*show_options)(struct seq_file *, struct vfsmount *);
65614- int (*show_stats)(struct seq_file *, struct vfsmount *);
65615+ struct inode *(* const alloc_inode)(struct super_block *sb);
65616+ void (* const destroy_inode)(struct inode *);
65617+
65618+ void (* const dirty_inode) (struct inode *);
65619+ int (* const write_inode) (struct inode *, int);
65620+ void (* const drop_inode) (struct inode *);
65621+ void (* const delete_inode) (struct inode *);
65622+ void (* const put_super) (struct super_block *);
65623+ void (* const write_super) (struct super_block *);
65624+ int (* const sync_fs)(struct super_block *sb, int wait);
65625+ int (* const freeze_fs) (struct super_block *);
65626+ int (* const unfreeze_fs) (struct super_block *);
65627+ int (* const statfs) (struct dentry *, struct kstatfs *);
65628+ int (* const remount_fs) (struct super_block *, int *, char *);
65629+ void (* const clear_inode) (struct inode *);
65630+ void (* const umount_begin) (struct super_block *);
65631+
65632+ int (* const show_options)(struct seq_file *, struct vfsmount *);
65633+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
65634 #ifdef CONFIG_QUOTA
65635- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
65636- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
65637+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
65638+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
65639 #endif
65640- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
65641+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
65642 };
65643
65644 /*
65645diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
65646index 78a05bf..2a7d3e1 100644
65647--- a/include/linux/fs_struct.h
65648+++ b/include/linux/fs_struct.h
65649@@ -4,7 +4,7 @@
65650 #include <linux/path.h>
65651
65652 struct fs_struct {
65653- int users;
65654+ atomic_t users;
65655 rwlock_t lock;
65656 int umask;
65657 int in_exec;
65658diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
65659index 7be0c6f..2f63a2b 100644
65660--- a/include/linux/fscache-cache.h
65661+++ b/include/linux/fscache-cache.h
65662@@ -116,7 +116,7 @@ struct fscache_operation {
65663 #endif
65664 };
65665
65666-extern atomic_t fscache_op_debug_id;
65667+extern atomic_unchecked_t fscache_op_debug_id;
65668 extern const struct slow_work_ops fscache_op_slow_work_ops;
65669
65670 extern void fscache_enqueue_operation(struct fscache_operation *);
65671@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
65672 fscache_operation_release_t release)
65673 {
65674 atomic_set(&op->usage, 1);
65675- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
65676+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
65677 op->release = release;
65678 INIT_LIST_HEAD(&op->pend_link);
65679 fscache_set_op_state(op, "Init");
65680diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
65681index 4ec5e67..42f1eb9 100644
65682--- a/include/linux/ftrace_event.h
65683+++ b/include/linux/ftrace_event.h
65684@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
65685 int filter_type);
65686 extern int trace_define_common_fields(struct ftrace_event_call *call);
65687
65688-#define is_signed_type(type) (((type)(-1)) < 0)
65689+#define is_signed_type(type) (((type)(-1)) < (type)1)
65690
65691 int trace_set_clr_event(const char *system, const char *event, int set);
65692
65693diff --git a/include/linux/genhd.h b/include/linux/genhd.h
65694index 297df45..b6a74ff 100644
65695--- a/include/linux/genhd.h
65696+++ b/include/linux/genhd.h
65697@@ -161,7 +161,7 @@ struct gendisk {
65698
65699 struct timer_rand_state *random;
65700
65701- atomic_t sync_io; /* RAID */
65702+ atomic_unchecked_t sync_io; /* RAID */
65703 struct work_struct async_notify;
65704 #ifdef CONFIG_BLK_DEV_INTEGRITY
65705 struct blk_integrity *integrity;
65706diff --git a/include/linux/gracl.h b/include/linux/gracl.h
65707new file mode 100644
65708index 0000000..0dc3943
65709--- /dev/null
65710+++ b/include/linux/gracl.h
65711@@ -0,0 +1,317 @@
65712+#ifndef GR_ACL_H
65713+#define GR_ACL_H
65714+
65715+#include <linux/grdefs.h>
65716+#include <linux/resource.h>
65717+#include <linux/capability.h>
65718+#include <linux/dcache.h>
65719+#include <asm/resource.h>
65720+
65721+/* Major status information */
65722+
65723+#define GR_VERSION "grsecurity 2.2.2"
65724+#define GRSECURITY_VERSION 0x2202
65725+
65726+enum {
65727+ GR_SHUTDOWN = 0,
65728+ GR_ENABLE = 1,
65729+ GR_SPROLE = 2,
65730+ GR_RELOAD = 3,
65731+ GR_SEGVMOD = 4,
65732+ GR_STATUS = 5,
65733+ GR_UNSPROLE = 6,
65734+ GR_PASSSET = 7,
65735+ GR_SPROLEPAM = 8,
65736+};
65737+
65738+/* Password setup definitions
65739+ * kernel/grhash.c */
65740+enum {
65741+ GR_PW_LEN = 128,
65742+ GR_SALT_LEN = 16,
65743+ GR_SHA_LEN = 32,
65744+};
65745+
65746+enum {
65747+ GR_SPROLE_LEN = 64,
65748+};
65749+
65750+enum {
65751+ GR_NO_GLOB = 0,
65752+ GR_REG_GLOB,
65753+ GR_CREATE_GLOB
65754+};
65755+
65756+#define GR_NLIMITS 32
65757+
65758+/* Begin Data Structures */
65759+
65760+struct sprole_pw {
65761+ unsigned char *rolename;
65762+ unsigned char salt[GR_SALT_LEN];
65763+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
65764+};
65765+
65766+struct name_entry {
65767+ __u32 key;
65768+ ino_t inode;
65769+ dev_t device;
65770+ char *name;
65771+ __u16 len;
65772+ __u8 deleted;
65773+ struct name_entry *prev;
65774+ struct name_entry *next;
65775+};
65776+
65777+struct inodev_entry {
65778+ struct name_entry *nentry;
65779+ struct inodev_entry *prev;
65780+ struct inodev_entry *next;
65781+};
65782+
65783+struct acl_role_db {
65784+ struct acl_role_label **r_hash;
65785+ __u32 r_size;
65786+};
65787+
65788+struct inodev_db {
65789+ struct inodev_entry **i_hash;
65790+ __u32 i_size;
65791+};
65792+
65793+struct name_db {
65794+ struct name_entry **n_hash;
65795+ __u32 n_size;
65796+};
65797+
65798+struct crash_uid {
65799+ uid_t uid;
65800+ unsigned long expires;
65801+};
65802+
65803+struct gr_hash_struct {
65804+ void **table;
65805+ void **nametable;
65806+ void *first;
65807+ __u32 table_size;
65808+ __u32 used_size;
65809+ int type;
65810+};
65811+
65812+/* Userspace Grsecurity ACL data structures */
65813+
65814+struct acl_subject_label {
65815+ char *filename;
65816+ ino_t inode;
65817+ dev_t device;
65818+ __u32 mode;
65819+ kernel_cap_t cap_mask;
65820+ kernel_cap_t cap_lower;
65821+ kernel_cap_t cap_invert_audit;
65822+
65823+ struct rlimit res[GR_NLIMITS];
65824+ __u32 resmask;
65825+
65826+ __u8 user_trans_type;
65827+ __u8 group_trans_type;
65828+ uid_t *user_transitions;
65829+ gid_t *group_transitions;
65830+ __u16 user_trans_num;
65831+ __u16 group_trans_num;
65832+
65833+ __u32 sock_families[2];
65834+ __u32 ip_proto[8];
65835+ __u32 ip_type;
65836+ struct acl_ip_label **ips;
65837+ __u32 ip_num;
65838+ __u32 inaddr_any_override;
65839+
65840+ __u32 crashes;
65841+ unsigned long expires;
65842+
65843+ struct acl_subject_label *parent_subject;
65844+ struct gr_hash_struct *hash;
65845+ struct acl_subject_label *prev;
65846+ struct acl_subject_label *next;
65847+
65848+ struct acl_object_label **obj_hash;
65849+ __u32 obj_hash_size;
65850+ __u16 pax_flags;
65851+};
65852+
65853+struct role_allowed_ip {
65854+ __u32 addr;
65855+ __u32 netmask;
65856+
65857+ struct role_allowed_ip *prev;
65858+ struct role_allowed_ip *next;
65859+};
65860+
65861+struct role_transition {
65862+ char *rolename;
65863+
65864+ struct role_transition *prev;
65865+ struct role_transition *next;
65866+};
65867+
65868+struct acl_role_label {
65869+ char *rolename;
65870+ uid_t uidgid;
65871+ __u16 roletype;
65872+
65873+ __u16 auth_attempts;
65874+ unsigned long expires;
65875+
65876+ struct acl_subject_label *root_label;
65877+ struct gr_hash_struct *hash;
65878+
65879+ struct acl_role_label *prev;
65880+ struct acl_role_label *next;
65881+
65882+ struct role_transition *transitions;
65883+ struct role_allowed_ip *allowed_ips;
65884+ uid_t *domain_children;
65885+ __u16 domain_child_num;
65886+
65887+ struct acl_subject_label **subj_hash;
65888+ __u32 subj_hash_size;
65889+};
65890+
65891+struct user_acl_role_db {
65892+ struct acl_role_label **r_table;
65893+ __u32 num_pointers; /* Number of allocations to track */
65894+ __u32 num_roles; /* Number of roles */
65895+ __u32 num_domain_children; /* Number of domain children */
65896+ __u32 num_subjects; /* Number of subjects */
65897+ __u32 num_objects; /* Number of objects */
65898+};
65899+
65900+struct acl_object_label {
65901+ char *filename;
65902+ ino_t inode;
65903+ dev_t device;
65904+ __u32 mode;
65905+
65906+ struct acl_subject_label *nested;
65907+ struct acl_object_label *globbed;
65908+
65909+ /* next two structures not used */
65910+
65911+ struct acl_object_label *prev;
65912+ struct acl_object_label *next;
65913+};
65914+
65915+struct acl_ip_label {
65916+ char *iface;
65917+ __u32 addr;
65918+ __u32 netmask;
65919+ __u16 low, high;
65920+ __u8 mode;
65921+ __u32 type;
65922+ __u32 proto[8];
65923+
65924+ /* next two structures not used */
65925+
65926+ struct acl_ip_label *prev;
65927+ struct acl_ip_label *next;
65928+};
65929+
65930+struct gr_arg {
65931+ struct user_acl_role_db role_db;
65932+ unsigned char pw[GR_PW_LEN];
65933+ unsigned char salt[GR_SALT_LEN];
65934+ unsigned char sum[GR_SHA_LEN];
65935+ unsigned char sp_role[GR_SPROLE_LEN];
65936+ struct sprole_pw *sprole_pws;
65937+ dev_t segv_device;
65938+ ino_t segv_inode;
65939+ uid_t segv_uid;
65940+ __u16 num_sprole_pws;
65941+ __u16 mode;
65942+};
65943+
65944+struct gr_arg_wrapper {
65945+ struct gr_arg *arg;
65946+ __u32 version;
65947+ __u32 size;
65948+};
65949+
65950+struct subject_map {
65951+ struct acl_subject_label *user;
65952+ struct acl_subject_label *kernel;
65953+ struct subject_map *prev;
65954+ struct subject_map *next;
65955+};
65956+
65957+struct acl_subj_map_db {
65958+ struct subject_map **s_hash;
65959+ __u32 s_size;
65960+};
65961+
65962+/* End Data Structures Section */
65963+
65964+/* Hash functions generated by empirical testing by Brad Spengler
65965+ Makes good use of the low bits of the inode. Generally 0-1 times
65966+ in loop for successful match. 0-3 for unsuccessful match.
65967+ Shift/add algorithm with modulus of table size and an XOR*/
65968+
65969+static __inline__ unsigned int
65970+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
65971+{
65972+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
65973+}
65974+
65975+ static __inline__ unsigned int
65976+shash(const struct acl_subject_label *userp, const unsigned int sz)
65977+{
65978+ return ((const unsigned long)userp % sz);
65979+}
65980+
65981+static __inline__ unsigned int
65982+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
65983+{
65984+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
65985+}
65986+
65987+static __inline__ unsigned int
65988+nhash(const char *name, const __u16 len, const unsigned int sz)
65989+{
65990+ return full_name_hash((const unsigned char *)name, len) % sz;
65991+}
65992+
65993+#define FOR_EACH_ROLE_START(role) \
65994+ role = role_list; \
65995+ while (role) {
65996+
65997+#define FOR_EACH_ROLE_END(role) \
65998+ role = role->prev; \
65999+ }
66000+
66001+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
66002+ subj = NULL; \
66003+ iter = 0; \
66004+ while (iter < role->subj_hash_size) { \
66005+ if (subj == NULL) \
66006+ subj = role->subj_hash[iter]; \
66007+ if (subj == NULL) { \
66008+ iter++; \
66009+ continue; \
66010+ }
66011+
66012+#define FOR_EACH_SUBJECT_END(subj,iter) \
66013+ subj = subj->next; \
66014+ if (subj == NULL) \
66015+ iter++; \
66016+ }
66017+
66018+
66019+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
66020+ subj = role->hash->first; \
66021+ while (subj != NULL) {
66022+
66023+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
66024+ subj = subj->next; \
66025+ }
66026+
66027+#endif
66028+
66029diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
66030new file mode 100644
66031index 0000000..323ecf2
66032--- /dev/null
66033+++ b/include/linux/gralloc.h
66034@@ -0,0 +1,9 @@
66035+#ifndef __GRALLOC_H
66036+#define __GRALLOC_H
66037+
66038+void acl_free_all(void);
66039+int acl_alloc_stack_init(unsigned long size);
66040+void *acl_alloc(unsigned long len);
66041+void *acl_alloc_num(unsigned long num, unsigned long len);
66042+
66043+#endif
66044diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
66045new file mode 100644
66046index 0000000..70d6cd5
66047--- /dev/null
66048+++ b/include/linux/grdefs.h
66049@@ -0,0 +1,140 @@
66050+#ifndef GRDEFS_H
66051+#define GRDEFS_H
66052+
66053+/* Begin grsecurity status declarations */
66054+
66055+enum {
66056+ GR_READY = 0x01,
66057+ GR_STATUS_INIT = 0x00 // disabled state
66058+};
66059+
66060+/* Begin ACL declarations */
66061+
66062+/* Role flags */
66063+
66064+enum {
66065+ GR_ROLE_USER = 0x0001,
66066+ GR_ROLE_GROUP = 0x0002,
66067+ GR_ROLE_DEFAULT = 0x0004,
66068+ GR_ROLE_SPECIAL = 0x0008,
66069+ GR_ROLE_AUTH = 0x0010,
66070+ GR_ROLE_NOPW = 0x0020,
66071+ GR_ROLE_GOD = 0x0040,
66072+ GR_ROLE_LEARN = 0x0080,
66073+ GR_ROLE_TPE = 0x0100,
66074+ GR_ROLE_DOMAIN = 0x0200,
66075+ GR_ROLE_PAM = 0x0400,
66076+ GR_ROLE_PERSIST = 0x800
66077+};
66078+
66079+/* ACL Subject and Object mode flags */
66080+enum {
66081+ GR_DELETED = 0x80000000
66082+};
66083+
66084+/* ACL Object-only mode flags */
66085+enum {
66086+ GR_READ = 0x00000001,
66087+ GR_APPEND = 0x00000002,
66088+ GR_WRITE = 0x00000004,
66089+ GR_EXEC = 0x00000008,
66090+ GR_FIND = 0x00000010,
66091+ GR_INHERIT = 0x00000020,
66092+ GR_SETID = 0x00000040,
66093+ GR_CREATE = 0x00000080,
66094+ GR_DELETE = 0x00000100,
66095+ GR_LINK = 0x00000200,
66096+ GR_AUDIT_READ = 0x00000400,
66097+ GR_AUDIT_APPEND = 0x00000800,
66098+ GR_AUDIT_WRITE = 0x00001000,
66099+ GR_AUDIT_EXEC = 0x00002000,
66100+ GR_AUDIT_FIND = 0x00004000,
66101+ GR_AUDIT_INHERIT= 0x00008000,
66102+ GR_AUDIT_SETID = 0x00010000,
66103+ GR_AUDIT_CREATE = 0x00020000,
66104+ GR_AUDIT_DELETE = 0x00040000,
66105+ GR_AUDIT_LINK = 0x00080000,
66106+ GR_PTRACERD = 0x00100000,
66107+ GR_NOPTRACE = 0x00200000,
66108+ GR_SUPPRESS = 0x00400000,
66109+ GR_NOLEARN = 0x00800000,
66110+ GR_INIT_TRANSFER= 0x01000000
66111+};
66112+
66113+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
66114+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
66115+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
66116+
66117+/* ACL subject-only mode flags */
66118+enum {
66119+ GR_KILL = 0x00000001,
66120+ GR_VIEW = 0x00000002,
66121+ GR_PROTECTED = 0x00000004,
66122+ GR_LEARN = 0x00000008,
66123+ GR_OVERRIDE = 0x00000010,
66124+ /* just a placeholder, this mode is only used in userspace */
66125+ GR_DUMMY = 0x00000020,
66126+ GR_PROTSHM = 0x00000040,
66127+ GR_KILLPROC = 0x00000080,
66128+ GR_KILLIPPROC = 0x00000100,
66129+ /* just a placeholder, this mode is only used in userspace */
66130+ GR_NOTROJAN = 0x00000200,
66131+ GR_PROTPROCFD = 0x00000400,
66132+ GR_PROCACCT = 0x00000800,
66133+ GR_RELAXPTRACE = 0x00001000,
66134+ GR_NESTED = 0x00002000,
66135+ GR_INHERITLEARN = 0x00004000,
66136+ GR_PROCFIND = 0x00008000,
66137+ GR_POVERRIDE = 0x00010000,
66138+ GR_KERNELAUTH = 0x00020000,
66139+ GR_ATSECURE = 0x00040000,
66140+ GR_SHMEXEC = 0x00080000
66141+};
66142+
66143+enum {
66144+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
66145+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
66146+ GR_PAX_ENABLE_MPROTECT = 0x0004,
66147+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
66148+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
66149+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
66150+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
66151+ GR_PAX_DISABLE_MPROTECT = 0x0400,
66152+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
66153+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
66154+};
66155+
66156+enum {
66157+ GR_ID_USER = 0x01,
66158+ GR_ID_GROUP = 0x02,
66159+};
66160+
66161+enum {
66162+ GR_ID_ALLOW = 0x01,
66163+ GR_ID_DENY = 0x02,
66164+};
66165+
66166+#define GR_CRASH_RES 31
66167+#define GR_UIDTABLE_MAX 500
66168+
66169+/* begin resource learning section */
66170+enum {
66171+ GR_RLIM_CPU_BUMP = 60,
66172+ GR_RLIM_FSIZE_BUMP = 50000,
66173+ GR_RLIM_DATA_BUMP = 10000,
66174+ GR_RLIM_STACK_BUMP = 1000,
66175+ GR_RLIM_CORE_BUMP = 10000,
66176+ GR_RLIM_RSS_BUMP = 500000,
66177+ GR_RLIM_NPROC_BUMP = 1,
66178+ GR_RLIM_NOFILE_BUMP = 5,
66179+ GR_RLIM_MEMLOCK_BUMP = 50000,
66180+ GR_RLIM_AS_BUMP = 500000,
66181+ GR_RLIM_LOCKS_BUMP = 2,
66182+ GR_RLIM_SIGPENDING_BUMP = 5,
66183+ GR_RLIM_MSGQUEUE_BUMP = 10000,
66184+ GR_RLIM_NICE_BUMP = 1,
66185+ GR_RLIM_RTPRIO_BUMP = 1,
66186+ GR_RLIM_RTTIME_BUMP = 1000000
66187+};
66188+
66189+#endif
66190diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
66191new file mode 100644
66192index 0000000..e5817d7
66193--- /dev/null
66194+++ b/include/linux/grinternal.h
66195@@ -0,0 +1,218 @@
66196+#ifndef __GRINTERNAL_H
66197+#define __GRINTERNAL_H
66198+
66199+#ifdef CONFIG_GRKERNSEC
66200+
66201+#include <linux/fs.h>
66202+#include <linux/mnt_namespace.h>
66203+#include <linux/nsproxy.h>
66204+#include <linux/gracl.h>
66205+#include <linux/grdefs.h>
66206+#include <linux/grmsg.h>
66207+
66208+void gr_add_learn_entry(const char *fmt, ...)
66209+ __attribute__ ((format (printf, 1, 2)));
66210+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
66211+ const struct vfsmount *mnt);
66212+__u32 gr_check_create(const struct dentry *new_dentry,
66213+ const struct dentry *parent,
66214+ const struct vfsmount *mnt, const __u32 mode);
66215+int gr_check_protected_task(const struct task_struct *task);
66216+__u32 to_gr_audit(const __u32 reqmode);
66217+int gr_set_acls(const int type);
66218+int gr_apply_subject_to_task(struct task_struct *task);
66219+int gr_acl_is_enabled(void);
66220+char gr_roletype_to_char(void);
66221+
66222+void gr_handle_alertkill(struct task_struct *task);
66223+char *gr_to_filename(const struct dentry *dentry,
66224+ const struct vfsmount *mnt);
66225+char *gr_to_filename1(const struct dentry *dentry,
66226+ const struct vfsmount *mnt);
66227+char *gr_to_filename2(const struct dentry *dentry,
66228+ const struct vfsmount *mnt);
66229+char *gr_to_filename3(const struct dentry *dentry,
66230+ const struct vfsmount *mnt);
66231+
66232+extern int grsec_enable_harden_ptrace;
66233+extern int grsec_enable_link;
66234+extern int grsec_enable_fifo;
66235+extern int grsec_enable_shm;
66236+extern int grsec_enable_execlog;
66237+extern int grsec_enable_signal;
66238+extern int grsec_enable_audit_ptrace;
66239+extern int grsec_enable_forkfail;
66240+extern int grsec_enable_time;
66241+extern int grsec_enable_rofs;
66242+extern int grsec_enable_chroot_shmat;
66243+extern int grsec_enable_chroot_mount;
66244+extern int grsec_enable_chroot_double;
66245+extern int grsec_enable_chroot_pivot;
66246+extern int grsec_enable_chroot_chdir;
66247+extern int grsec_enable_chroot_chmod;
66248+extern int grsec_enable_chroot_mknod;
66249+extern int grsec_enable_chroot_fchdir;
66250+extern int grsec_enable_chroot_nice;
66251+extern int grsec_enable_chroot_execlog;
66252+extern int grsec_enable_chroot_caps;
66253+extern int grsec_enable_chroot_sysctl;
66254+extern int grsec_enable_chroot_unix;
66255+extern int grsec_enable_tpe;
66256+extern int grsec_tpe_gid;
66257+extern int grsec_enable_tpe_all;
66258+extern int grsec_enable_tpe_invert;
66259+extern int grsec_enable_socket_all;
66260+extern int grsec_socket_all_gid;
66261+extern int grsec_enable_socket_client;
66262+extern int grsec_socket_client_gid;
66263+extern int grsec_enable_socket_server;
66264+extern int grsec_socket_server_gid;
66265+extern int grsec_audit_gid;
66266+extern int grsec_enable_group;
66267+extern int grsec_enable_audit_textrel;
66268+extern int grsec_enable_log_rwxmaps;
66269+extern int grsec_enable_mount;
66270+extern int grsec_enable_chdir;
66271+extern int grsec_resource_logging;
66272+extern int grsec_enable_blackhole;
66273+extern int grsec_lastack_retries;
66274+extern int grsec_enable_brute;
66275+extern int grsec_lock;
66276+
66277+extern spinlock_t grsec_alert_lock;
66278+extern unsigned long grsec_alert_wtime;
66279+extern unsigned long grsec_alert_fyet;
66280+
66281+extern spinlock_t grsec_audit_lock;
66282+
66283+extern rwlock_t grsec_exec_file_lock;
66284+
66285+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
66286+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
66287+ (tsk)->exec_file->f_vfsmnt) : "/")
66288+
66289+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
66290+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
66291+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
66292+
66293+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
66294+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
66295+ (tsk)->exec_file->f_vfsmnt) : "/")
66296+
66297+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
66298+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
66299+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
66300+
66301+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
66302+
66303+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
66304+
66305+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
66306+ (task)->pid, (cred)->uid, \
66307+ (cred)->euid, (cred)->gid, (cred)->egid, \
66308+ gr_parent_task_fullpath(task), \
66309+ (task)->real_parent->comm, (task)->real_parent->pid, \
66310+ (pcred)->uid, (pcred)->euid, \
66311+ (pcred)->gid, (pcred)->egid
66312+
66313+#define GR_CHROOT_CAPS {{ \
66314+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
66315+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
66316+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
66317+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
66318+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
66319+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
66320+ CAP_TO_MASK(CAP_MAC_ADMIN) }}
66321+
66322+#define security_learn(normal_msg,args...) \
66323+({ \
66324+ read_lock(&grsec_exec_file_lock); \
66325+ gr_add_learn_entry(normal_msg "\n", ## args); \
66326+ read_unlock(&grsec_exec_file_lock); \
66327+})
66328+
66329+enum {
66330+ GR_DO_AUDIT,
66331+ GR_DONT_AUDIT,
66332+ GR_DONT_AUDIT_GOOD
66333+};
66334+
66335+enum {
66336+ GR_TTYSNIFF,
66337+ GR_RBAC,
66338+ GR_RBAC_STR,
66339+ GR_STR_RBAC,
66340+ GR_RBAC_MODE2,
66341+ GR_RBAC_MODE3,
66342+ GR_FILENAME,
66343+ GR_SYSCTL_HIDDEN,
66344+ GR_NOARGS,
66345+ GR_ONE_INT,
66346+ GR_ONE_INT_TWO_STR,
66347+ GR_ONE_STR,
66348+ GR_STR_INT,
66349+ GR_TWO_STR_INT,
66350+ GR_TWO_INT,
66351+ GR_TWO_U64,
66352+ GR_THREE_INT,
66353+ GR_FIVE_INT_TWO_STR,
66354+ GR_TWO_STR,
66355+ GR_THREE_STR,
66356+ GR_FOUR_STR,
66357+ GR_STR_FILENAME,
66358+ GR_FILENAME_STR,
66359+ GR_FILENAME_TWO_INT,
66360+ GR_FILENAME_TWO_INT_STR,
66361+ GR_TEXTREL,
66362+ GR_PTRACE,
66363+ GR_RESOURCE,
66364+ GR_CAP,
66365+ GR_SIG,
66366+ GR_SIG2,
66367+ GR_CRASH1,
66368+ GR_CRASH2,
66369+ GR_PSACCT,
66370+ GR_RWXMAP
66371+};
66372+
66373+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
66374+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
66375+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
66376+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
66377+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
66378+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
66379+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
66380+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
66381+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
66382+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
66383+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
66384+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
66385+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
66386+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
66387+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
66388+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
66389+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
66390+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
66391+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
66392+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
66393+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
66394+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
66395+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
66396+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
66397+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
66398+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
66399+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
66400+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
66401+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
66402+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
66403+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
66404+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
66405+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
66406+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
66407+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
66408+
66409+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
66410+
66411+#endif
66412+
66413+#endif
66414diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
66415new file mode 100644
66416index 0000000..9d5fd4a
66417--- /dev/null
66418+++ b/include/linux/grmsg.h
66419@@ -0,0 +1,108 @@
66420+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
66421+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
66422+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
66423+#define GR_STOPMOD_MSG "denied modification of module state by "
66424+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
66425+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
66426+#define GR_IOPERM_MSG "denied use of ioperm() by "
66427+#define GR_IOPL_MSG "denied use of iopl() by "
66428+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
66429+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
66430+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
66431+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
66432+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
66433+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
66434+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
66435+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
66436+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
66437+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
66438+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
66439+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
66440+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
66441+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
66442+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
66443+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
66444+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
66445+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
66446+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
66447+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
66448+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
66449+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
66450+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
66451+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
66452+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
66453+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
66454+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
66455+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
66456+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
66457+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
66458+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
66459+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
66460+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
66461+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
66462+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
66463+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
66464+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
66465+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
66466+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
66467+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
66468+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
66469+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
66470+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
66471+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
66472+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
66473+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
66474+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
66475+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
66476+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
66477+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
66478+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
66479+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
66480+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
66481+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
66482+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
66483+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
66484+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
66485+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
66486+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
66487+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
66488+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
66489+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
66490+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
66491+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
66492+#define GR_FAILFORK_MSG "failed fork with errno %s by "
66493+#define GR_NICE_CHROOT_MSG "denied priority change by "
66494+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
66495+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
66496+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
66497+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
66498+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
66499+#define GR_TIME_MSG "time set by "
66500+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
66501+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
66502+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
66503+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
66504+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
66505+#define GR_BIND_MSG "denied bind() by "
66506+#define GR_CONNECT_MSG "denied connect() by "
66507+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
66508+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
66509+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
66510+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
66511+#define GR_CAP_ACL_MSG "use of %s denied for "
66512+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
66513+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
66514+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
66515+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
66516+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
66517+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
66518+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
66519+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
66520+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
66521+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
66522+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
66523+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
66524+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
66525+#define GR_VM86_MSG "denied use of vm86 by "
66526+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
66527+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
66528diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
66529new file mode 100644
66530index 0000000..24676f4
66531--- /dev/null
66532+++ b/include/linux/grsecurity.h
66533@@ -0,0 +1,218 @@
66534+#ifndef GR_SECURITY_H
66535+#define GR_SECURITY_H
66536+#include <linux/fs.h>
66537+#include <linux/fs_struct.h>
66538+#include <linux/binfmts.h>
66539+#include <linux/gracl.h>
66540+#include <linux/compat.h>
66541+
66542+/* notify of brain-dead configs */
66543+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66544+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
66545+#endif
66546+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
66547+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
66548+#endif
66549+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
66550+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
66551+#endif
66552+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
66553+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
66554+#endif
66555+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
66556+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
66557+#endif
66558+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
66559+#error "CONFIG_PAX enabled, but no PaX options are enabled."
66560+#endif
66561+
66562+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
66563+void gr_handle_brute_check(void);
66564+void gr_handle_kernel_exploit(void);
66565+int gr_process_user_ban(void);
66566+
66567+char gr_roletype_to_char(void);
66568+
66569+int gr_acl_enable_at_secure(void);
66570+
66571+int gr_check_user_change(int real, int effective, int fs);
66572+int gr_check_group_change(int real, int effective, int fs);
66573+
66574+void gr_del_task_from_ip_table(struct task_struct *p);
66575+
66576+int gr_pid_is_chrooted(struct task_struct *p);
66577+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
66578+int gr_handle_chroot_nice(void);
66579+int gr_handle_chroot_sysctl(const int op);
66580+int gr_handle_chroot_setpriority(struct task_struct *p,
66581+ const int niceval);
66582+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
66583+int gr_handle_chroot_chroot(const struct dentry *dentry,
66584+ const struct vfsmount *mnt);
66585+void gr_handle_chroot_chdir(struct path *path);
66586+int gr_handle_chroot_chmod(const struct dentry *dentry,
66587+ const struct vfsmount *mnt, const int mode);
66588+int gr_handle_chroot_mknod(const struct dentry *dentry,
66589+ const struct vfsmount *mnt, const int mode);
66590+int gr_handle_chroot_mount(const struct dentry *dentry,
66591+ const struct vfsmount *mnt,
66592+ const char *dev_name);
66593+int gr_handle_chroot_pivot(void);
66594+int gr_handle_chroot_unix(const pid_t pid);
66595+
66596+int gr_handle_rawio(const struct inode *inode);
66597+
66598+void gr_handle_ioperm(void);
66599+void gr_handle_iopl(void);
66600+
66601+int gr_tpe_allow(const struct file *file);
66602+
66603+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
66604+void gr_clear_chroot_entries(struct task_struct *task);
66605+
66606+void gr_log_forkfail(const int retval);
66607+void gr_log_timechange(void);
66608+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
66609+void gr_log_chdir(const struct dentry *dentry,
66610+ const struct vfsmount *mnt);
66611+void gr_log_chroot_exec(const struct dentry *dentry,
66612+ const struct vfsmount *mnt);
66613+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
66614+#ifdef CONFIG_COMPAT
66615+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
66616+#endif
66617+void gr_log_remount(const char *devname, const int retval);
66618+void gr_log_unmount(const char *devname, const int retval);
66619+void gr_log_mount(const char *from, const char *to, const int retval);
66620+void gr_log_textrel(struct vm_area_struct *vma);
66621+void gr_log_rwxmmap(struct file *file);
66622+void gr_log_rwxmprotect(struct file *file);
66623+
66624+int gr_handle_follow_link(const struct inode *parent,
66625+ const struct inode *inode,
66626+ const struct dentry *dentry,
66627+ const struct vfsmount *mnt);
66628+int gr_handle_fifo(const struct dentry *dentry,
66629+ const struct vfsmount *mnt,
66630+ const struct dentry *dir, const int flag,
66631+ const int acc_mode);
66632+int gr_handle_hardlink(const struct dentry *dentry,
66633+ const struct vfsmount *mnt,
66634+ struct inode *inode,
66635+ const int mode, const char *to);
66636+
66637+int gr_is_capable(const int cap);
66638+int gr_is_capable_nolog(const int cap);
66639+void gr_learn_resource(const struct task_struct *task, const int limit,
66640+ const unsigned long wanted, const int gt);
66641+void gr_copy_label(struct task_struct *tsk);
66642+void gr_handle_crash(struct task_struct *task, const int sig);
66643+int gr_handle_signal(const struct task_struct *p, const int sig);
66644+int gr_check_crash_uid(const uid_t uid);
66645+int gr_check_protected_task(const struct task_struct *task);
66646+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
66647+int gr_acl_handle_mmap(const struct file *file,
66648+ const unsigned long prot);
66649+int gr_acl_handle_mprotect(const struct file *file,
66650+ const unsigned long prot);
66651+int gr_check_hidden_task(const struct task_struct *tsk);
66652+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
66653+ const struct vfsmount *mnt);
66654+__u32 gr_acl_handle_utime(const struct dentry *dentry,
66655+ const struct vfsmount *mnt);
66656+__u32 gr_acl_handle_access(const struct dentry *dentry,
66657+ const struct vfsmount *mnt, const int fmode);
66658+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
66659+ const struct vfsmount *mnt, mode_t mode);
66660+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
66661+ const struct vfsmount *mnt, mode_t mode);
66662+__u32 gr_acl_handle_chown(const struct dentry *dentry,
66663+ const struct vfsmount *mnt);
66664+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
66665+ const struct vfsmount *mnt);
66666+int gr_handle_ptrace(struct task_struct *task, const long request);
66667+int gr_handle_proc_ptrace(struct task_struct *task);
66668+__u32 gr_acl_handle_execve(const struct dentry *dentry,
66669+ const struct vfsmount *mnt);
66670+int gr_check_crash_exec(const struct file *filp);
66671+int gr_acl_is_enabled(void);
66672+void gr_set_kernel_label(struct task_struct *task);
66673+void gr_set_role_label(struct task_struct *task, const uid_t uid,
66674+ const gid_t gid);
66675+int gr_set_proc_label(const struct dentry *dentry,
66676+ const struct vfsmount *mnt,
66677+ const int unsafe_share);
66678+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
66679+ const struct vfsmount *mnt);
66680+__u32 gr_acl_handle_open(const struct dentry *dentry,
66681+ const struct vfsmount *mnt, int acc_mode);
66682+__u32 gr_acl_handle_creat(const struct dentry *dentry,
66683+ const struct dentry *p_dentry,
66684+ const struct vfsmount *p_mnt,
66685+ int open_flags, int acc_mode, const int imode);
66686+void gr_handle_create(const struct dentry *dentry,
66687+ const struct vfsmount *mnt);
66688+void gr_handle_proc_create(const struct dentry *dentry,
66689+ const struct inode *inode);
66690+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
66691+ const struct dentry *parent_dentry,
66692+ const struct vfsmount *parent_mnt,
66693+ const int mode);
66694+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
66695+ const struct dentry *parent_dentry,
66696+ const struct vfsmount *parent_mnt);
66697+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
66698+ const struct vfsmount *mnt);
66699+void gr_handle_delete(const ino_t ino, const dev_t dev);
66700+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
66701+ const struct vfsmount *mnt);
66702+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
66703+ const struct dentry *parent_dentry,
66704+ const struct vfsmount *parent_mnt,
66705+ const char *from);
66706+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
66707+ const struct dentry *parent_dentry,
66708+ const struct vfsmount *parent_mnt,
66709+ const struct dentry *old_dentry,
66710+ const struct vfsmount *old_mnt, const char *to);
66711+int gr_acl_handle_rename(struct dentry *new_dentry,
66712+ struct dentry *parent_dentry,
66713+ const struct vfsmount *parent_mnt,
66714+ struct dentry *old_dentry,
66715+ struct inode *old_parent_inode,
66716+ struct vfsmount *old_mnt, const char *newname);
66717+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
66718+ struct dentry *old_dentry,
66719+ struct dentry *new_dentry,
66720+ struct vfsmount *mnt, const __u8 replace);
66721+__u32 gr_check_link(const struct dentry *new_dentry,
66722+ const struct dentry *parent_dentry,
66723+ const struct vfsmount *parent_mnt,
66724+ const struct dentry *old_dentry,
66725+ const struct vfsmount *old_mnt);
66726+int gr_acl_handle_filldir(const struct file *file, const char *name,
66727+ const unsigned int namelen, const ino_t ino);
66728+
66729+__u32 gr_acl_handle_unix(const struct dentry *dentry,
66730+ const struct vfsmount *mnt);
66731+void gr_acl_handle_exit(void);
66732+void gr_acl_handle_psacct(struct task_struct *task, const long code);
66733+int gr_acl_handle_procpidmem(const struct task_struct *task);
66734+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
66735+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
66736+void gr_audit_ptrace(struct task_struct *task);
66737+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
66738+
66739+#ifdef CONFIG_GRKERNSEC
66740+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
66741+void gr_handle_vm86(void);
66742+void gr_handle_mem_readwrite(u64 from, u64 to);
66743+
66744+extern int grsec_enable_dmesg;
66745+extern int grsec_disable_privio;
66746+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66747+extern int grsec_enable_chroot_findtask;
66748+#endif
66749+#endif
66750+
66751+#endif
66752diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
66753index 6a87154..a3ce57b 100644
66754--- a/include/linux/hdpu_features.h
66755+++ b/include/linux/hdpu_features.h
66756@@ -3,7 +3,7 @@
66757 struct cpustate_t {
66758 spinlock_t lock;
66759 int excl;
66760- int open_count;
66761+ atomic_t open_count;
66762 unsigned char cached_val;
66763 int inited;
66764 unsigned long *set_addr;
66765diff --git a/include/linux/highmem.h b/include/linux/highmem.h
66766index 211ff44..00ab6d7 100644
66767--- a/include/linux/highmem.h
66768+++ b/include/linux/highmem.h
66769@@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
66770 kunmap_atomic(kaddr, KM_USER0);
66771 }
66772
66773+static inline void sanitize_highpage(struct page *page)
66774+{
66775+ void *kaddr;
66776+ unsigned long flags;
66777+
66778+ local_irq_save(flags);
66779+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
66780+ clear_page(kaddr);
66781+ kunmap_atomic(kaddr, KM_CLEARPAGE);
66782+ local_irq_restore(flags);
66783+}
66784+
66785 static inline void zero_user_segments(struct page *page,
66786 unsigned start1, unsigned end1,
66787 unsigned start2, unsigned end2)
66788diff --git a/include/linux/i2c.h b/include/linux/i2c.h
66789index 7b40cda..24eb44e 100644
66790--- a/include/linux/i2c.h
66791+++ b/include/linux/i2c.h
66792@@ -325,6 +325,7 @@ struct i2c_algorithm {
66793 /* To determine what the adapter supports */
66794 u32 (*functionality) (struct i2c_adapter *);
66795 };
66796+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
66797
66798 /*
66799 * i2c_adapter is the structure used to identify a physical i2c bus along
66800diff --git a/include/linux/i2o.h b/include/linux/i2o.h
66801index 4c4e57d..f3c5303 100644
66802--- a/include/linux/i2o.h
66803+++ b/include/linux/i2o.h
66804@@ -564,7 +564,7 @@ struct i2o_controller {
66805 struct i2o_device *exec; /* Executive */
66806 #if BITS_PER_LONG == 64
66807 spinlock_t context_list_lock; /* lock for context_list */
66808- atomic_t context_list_counter; /* needed for unique contexts */
66809+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
66810 struct list_head context_list; /* list of context id's
66811 and pointers */
66812 #endif
66813diff --git a/include/linux/init_task.h b/include/linux/init_task.h
66814index 21a6f5d..dc42eab 100644
66815--- a/include/linux/init_task.h
66816+++ b/include/linux/init_task.h
66817@@ -83,6 +83,12 @@ extern struct group_info init_groups;
66818 #define INIT_IDS
66819 #endif
66820
66821+#ifdef CONFIG_X86
66822+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
66823+#else
66824+#define INIT_TASK_THREAD_INFO
66825+#endif
66826+
66827 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
66828 /*
66829 * Because of the reduced scope of CAP_SETPCAP when filesystem
66830@@ -156,6 +162,7 @@ extern struct cred init_cred;
66831 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
66832 .comm = "swapper", \
66833 .thread = INIT_THREAD, \
66834+ INIT_TASK_THREAD_INFO \
66835 .fs = &init_fs, \
66836 .files = &init_files, \
66837 .signal = &init_signals, \
66838diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
66839index 4f0a72a..a849599 100644
66840--- a/include/linux/intel-iommu.h
66841+++ b/include/linux/intel-iommu.h
66842@@ -296,7 +296,7 @@ struct iommu_flush {
66843 u8 fm, u64 type);
66844 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
66845 unsigned int size_order, u64 type);
66846-};
66847+} __no_const;
66848
66849 enum {
66850 SR_DMAR_FECTL_REG,
66851diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
66852index c739150..be577b5 100644
66853--- a/include/linux/interrupt.h
66854+++ b/include/linux/interrupt.h
66855@@ -369,7 +369,7 @@ enum
66856 /* map softirq index to softirq name. update 'softirq_to_name' in
66857 * kernel/softirq.c when adding a new softirq.
66858 */
66859-extern char *softirq_to_name[NR_SOFTIRQS];
66860+extern const char * const softirq_to_name[NR_SOFTIRQS];
66861
66862 /* softirq mask and active fields moved to irq_cpustat_t in
66863 * asm/hardirq.h to get better cache usage. KAO
66864@@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
66865
66866 struct softirq_action
66867 {
66868- void (*action)(struct softirq_action *);
66869+ void (*action)(void);
66870 };
66871
66872 asmlinkage void do_softirq(void);
66873 asmlinkage void __do_softirq(void);
66874-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
66875+extern void open_softirq(int nr, void (*action)(void));
66876 extern void softirq_init(void);
66877 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
66878 extern void raise_softirq_irqoff(unsigned int nr);
66879diff --git a/include/linux/irq.h b/include/linux/irq.h
66880index 9e5f45a..025865b 100644
66881--- a/include/linux/irq.h
66882+++ b/include/linux/irq.h
66883@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
66884 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
66885 bool boot)
66886 {
66887+#ifdef CONFIG_CPUMASK_OFFSTACK
66888 gfp_t gfp = GFP_ATOMIC;
66889
66890 if (boot)
66891 gfp = GFP_NOWAIT;
66892
66893-#ifdef CONFIG_CPUMASK_OFFSTACK
66894 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
66895 return false;
66896
66897diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
66898index 7922742..27306a2 100644
66899--- a/include/linux/kallsyms.h
66900+++ b/include/linux/kallsyms.h
66901@@ -15,7 +15,8 @@
66902
66903 struct module;
66904
66905-#ifdef CONFIG_KALLSYMS
66906+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
66907+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66908 /* Lookup the address for a symbol. Returns 0 if not found. */
66909 unsigned long kallsyms_lookup_name(const char *name);
66910
66911@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
66912 /* Stupid that this does nothing, but I didn't create this mess. */
66913 #define __print_symbol(fmt, addr)
66914 #endif /*CONFIG_KALLSYMS*/
66915+#else /* when included by kallsyms.c, vsnprintf.c, or
66916+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
66917+extern void __print_symbol(const char *fmt, unsigned long address);
66918+extern int sprint_symbol(char *buffer, unsigned long address);
66919+const char *kallsyms_lookup(unsigned long addr,
66920+ unsigned long *symbolsize,
66921+ unsigned long *offset,
66922+ char **modname, char *namebuf);
66923+#endif
66924
66925 /* This macro allows us to keep printk typechecking */
66926 static void __check_printsym_format(const char *fmt, ...)
66927diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
66928index 6adcc29..13369e8 100644
66929--- a/include/linux/kgdb.h
66930+++ b/include/linux/kgdb.h
66931@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
66932
66933 extern int kgdb_connected;
66934
66935-extern atomic_t kgdb_setting_breakpoint;
66936-extern atomic_t kgdb_cpu_doing_single_step;
66937+extern atomic_unchecked_t kgdb_setting_breakpoint;
66938+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
66939
66940 extern struct task_struct *kgdb_usethread;
66941 extern struct task_struct *kgdb_contthread;
66942@@ -235,7 +235,7 @@ struct kgdb_arch {
66943 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
66944 void (*remove_all_hw_break)(void);
66945 void (*correct_hw_break)(void);
66946-};
66947+} __do_const;
66948
66949 /**
66950 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
66951@@ -257,14 +257,14 @@ struct kgdb_io {
66952 int (*init) (void);
66953 void (*pre_exception) (void);
66954 void (*post_exception) (void);
66955-};
66956+} __do_const;
66957
66958-extern struct kgdb_arch arch_kgdb_ops;
66959+extern const struct kgdb_arch arch_kgdb_ops;
66960
66961 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
66962
66963-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
66964-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
66965+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
66966+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
66967
66968 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
66969 extern int kgdb_mem2hex(char *mem, char *buf, int count);
66970diff --git a/include/linux/kmod.h b/include/linux/kmod.h
66971index 384ca8b..83dd97d 100644
66972--- a/include/linux/kmod.h
66973+++ b/include/linux/kmod.h
66974@@ -31,6 +31,8 @@
66975 * usually useless though. */
66976 extern int __request_module(bool wait, const char *name, ...) \
66977 __attribute__((format(printf, 2, 3)));
66978+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
66979+ __attribute__((format(printf, 3, 4)));
66980 #define request_module(mod...) __request_module(true, mod)
66981 #define request_module_nowait(mod...) __request_module(false, mod)
66982 #define try_then_request_module(x, mod...) \
66983diff --git a/include/linux/kobject.h b/include/linux/kobject.h
66984index 58ae8e0..3950d3c 100644
66985--- a/include/linux/kobject.h
66986+++ b/include/linux/kobject.h
66987@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
66988
66989 struct kobj_type {
66990 void (*release)(struct kobject *kobj);
66991- struct sysfs_ops *sysfs_ops;
66992+ const struct sysfs_ops *sysfs_ops;
66993 struct attribute **default_attrs;
66994 };
66995
66996@@ -118,9 +118,9 @@ struct kobj_uevent_env {
66997 };
66998
66999 struct kset_uevent_ops {
67000- int (*filter)(struct kset *kset, struct kobject *kobj);
67001- const char *(*name)(struct kset *kset, struct kobject *kobj);
67002- int (*uevent)(struct kset *kset, struct kobject *kobj,
67003+ int (* const filter)(struct kset *kset, struct kobject *kobj);
67004+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
67005+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
67006 struct kobj_uevent_env *env);
67007 };
67008
67009@@ -132,7 +132,7 @@ struct kobj_attribute {
67010 const char *buf, size_t count);
67011 };
67012
67013-extern struct sysfs_ops kobj_sysfs_ops;
67014+extern const struct sysfs_ops kobj_sysfs_ops;
67015
67016 /**
67017 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
67018@@ -155,14 +155,14 @@ struct kset {
67019 struct list_head list;
67020 spinlock_t list_lock;
67021 struct kobject kobj;
67022- struct kset_uevent_ops *uevent_ops;
67023+ const struct kset_uevent_ops *uevent_ops;
67024 };
67025
67026 extern void kset_init(struct kset *kset);
67027 extern int __must_check kset_register(struct kset *kset);
67028 extern void kset_unregister(struct kset *kset);
67029 extern struct kset * __must_check kset_create_and_add(const char *name,
67030- struct kset_uevent_ops *u,
67031+ const struct kset_uevent_ops *u,
67032 struct kobject *parent_kobj);
67033
67034 static inline struct kset *to_kset(struct kobject *kobj)
67035diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
67036index c728a50..752d821 100644
67037--- a/include/linux/kvm_host.h
67038+++ b/include/linux/kvm_host.h
67039@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
67040 void vcpu_load(struct kvm_vcpu *vcpu);
67041 void vcpu_put(struct kvm_vcpu *vcpu);
67042
67043-int kvm_init(void *opaque, unsigned int vcpu_size,
67044+int kvm_init(const void *opaque, unsigned int vcpu_size,
67045 struct module *module);
67046 void kvm_exit(void);
67047
67048@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
67049 struct kvm_guest_debug *dbg);
67050 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
67051
67052-int kvm_arch_init(void *opaque);
67053+int kvm_arch_init(const void *opaque);
67054 void kvm_arch_exit(void);
67055
67056 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
67057diff --git a/include/linux/libata.h b/include/linux/libata.h
67058index a069916..223edde 100644
67059--- a/include/linux/libata.h
67060+++ b/include/linux/libata.h
67061@@ -525,11 +525,11 @@ struct ata_ioports {
67062
67063 struct ata_host {
67064 spinlock_t lock;
67065- struct device *dev;
67066+ struct device *dev;
67067 void __iomem * const *iomap;
67068 unsigned int n_ports;
67069 void *private_data;
67070- struct ata_port_operations *ops;
67071+ const struct ata_port_operations *ops;
67072 unsigned long flags;
67073 #ifdef CONFIG_ATA_ACPI
67074 acpi_handle acpi_handle;
67075@@ -710,7 +710,7 @@ struct ata_link {
67076
67077 struct ata_port {
67078 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
67079- struct ata_port_operations *ops;
67080+ const struct ata_port_operations *ops;
67081 spinlock_t *lock;
67082 /* Flags owned by the EH context. Only EH should touch these once the
67083 port is active */
67084@@ -884,7 +884,7 @@ struct ata_port_operations {
67085 * fields must be pointers.
67086 */
67087 const struct ata_port_operations *inherits;
67088-};
67089+} __do_const;
67090
67091 struct ata_port_info {
67092 unsigned long flags;
67093@@ -892,7 +892,7 @@ struct ata_port_info {
67094 unsigned long pio_mask;
67095 unsigned long mwdma_mask;
67096 unsigned long udma_mask;
67097- struct ata_port_operations *port_ops;
67098+ const struct ata_port_operations *port_ops;
67099 void *private_data;
67100 };
67101
67102@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
67103 extern const unsigned long sata_deb_timing_hotplug[];
67104 extern const unsigned long sata_deb_timing_long[];
67105
67106-extern struct ata_port_operations ata_dummy_port_ops;
67107+extern const struct ata_port_operations ata_dummy_port_ops;
67108 extern const struct ata_port_info ata_dummy_port_info;
67109
67110 static inline const unsigned long *
67111@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
67112 struct scsi_host_template *sht);
67113 extern void ata_host_detach(struct ata_host *host);
67114 extern void ata_host_init(struct ata_host *, struct device *,
67115- unsigned long, struct ata_port_operations *);
67116+ unsigned long, const struct ata_port_operations *);
67117 extern int ata_scsi_detect(struct scsi_host_template *sht);
67118 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
67119 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
67120diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
67121index fbc48f8..0886e57 100644
67122--- a/include/linux/lockd/bind.h
67123+++ b/include/linux/lockd/bind.h
67124@@ -23,13 +23,13 @@ struct svc_rqst;
67125 * This is the set of functions for lockd->nfsd communication
67126 */
67127 struct nlmsvc_binding {
67128- __be32 (*fopen)(struct svc_rqst *,
67129+ __be32 (* const fopen)(struct svc_rqst *,
67130 struct nfs_fh *,
67131 struct file **);
67132- void (*fclose)(struct file *);
67133+ void (* const fclose)(struct file *);
67134 };
67135
67136-extern struct nlmsvc_binding * nlmsvc_ops;
67137+extern const struct nlmsvc_binding * nlmsvc_ops;
67138
67139 /*
67140 * Similar to nfs_client_initdata, but without the NFS-specific
67141diff --git a/include/linux/mca.h b/include/linux/mca.h
67142index 3797270..7765ede 100644
67143--- a/include/linux/mca.h
67144+++ b/include/linux/mca.h
67145@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
67146 int region);
67147 void * (*mca_transform_memory)(struct mca_device *,
67148 void *memory);
67149-};
67150+} __no_const;
67151
67152 struct mca_bus {
67153 u64 default_dma_mask;
67154diff --git a/include/linux/memory.h b/include/linux/memory.h
67155index 37fa19b..b597c85 100644
67156--- a/include/linux/memory.h
67157+++ b/include/linux/memory.h
67158@@ -108,7 +108,7 @@ struct memory_accessor {
67159 size_t count);
67160 ssize_t (*write)(struct memory_accessor *, const char *buf,
67161 off_t offset, size_t count);
67162-};
67163+} __no_const;
67164
67165 /*
67166 * Kernel text modification mutex, used for code patching. Users of this lock
67167diff --git a/include/linux/mm.h b/include/linux/mm.h
67168index 11e5be6..1ff2423 100644
67169--- a/include/linux/mm.h
67170+++ b/include/linux/mm.h
67171@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
67172
67173 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
67174 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
67175+
67176+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
67177+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
67178+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
67179+#else
67180 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
67181+#endif
67182+
67183 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
67184 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
67185
67186@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
67187 int set_page_dirty_lock(struct page *page);
67188 int clear_page_dirty_for_io(struct page *page);
67189
67190-/* Is the vma a continuation of the stack vma above it? */
67191-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
67192-{
67193- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
67194-}
67195-
67196 extern unsigned long move_page_tables(struct vm_area_struct *vma,
67197 unsigned long old_addr, struct vm_area_struct *new_vma,
67198 unsigned long new_addr, unsigned long len);
67199@@ -890,6 +891,8 @@ struct shrinker {
67200 extern void register_shrinker(struct shrinker *);
67201 extern void unregister_shrinker(struct shrinker *);
67202
67203+pgprot_t vm_get_page_prot(unsigned long vm_flags);
67204+
67205 int vma_wants_writenotify(struct vm_area_struct *vma);
67206
67207 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
67208@@ -1162,6 +1165,7 @@ out:
67209 }
67210
67211 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
67212+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
67213
67214 extern unsigned long do_brk(unsigned long, unsigned long);
67215
67216@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
67217 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
67218 struct vm_area_struct **pprev);
67219
67220+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
67221+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
67222+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
67223+
67224 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
67225 NULL if none. Assume start_addr < end_addr. */
67226 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
67227@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
67228 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
67229 }
67230
67231-pgprot_t vm_get_page_prot(unsigned long vm_flags);
67232 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
67233 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
67234 unsigned long pfn, unsigned long size, pgprot_t);
67235@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
67236 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
67237 extern int sysctl_memory_failure_early_kill;
67238 extern int sysctl_memory_failure_recovery;
67239-extern atomic_long_t mce_bad_pages;
67240+extern atomic_long_unchecked_t mce_bad_pages;
67241+
67242+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67243+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
67244+#else
67245+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
67246+#endif
67247
67248 #endif /* __KERNEL__ */
67249 #endif /* _LINUX_MM_H */
67250diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
67251index 9d12ed5..8023125 100644
67252--- a/include/linux/mm_types.h
67253+++ b/include/linux/mm_types.h
67254@@ -186,6 +186,8 @@ struct vm_area_struct {
67255 #ifdef CONFIG_NUMA
67256 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
67257 #endif
67258+
67259+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
67260 };
67261
67262 struct core_thread {
67263@@ -287,6 +289,24 @@ struct mm_struct {
67264 #ifdef CONFIG_MMU_NOTIFIER
67265 struct mmu_notifier_mm *mmu_notifier_mm;
67266 #endif
67267+
67268+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
67269+ unsigned long pax_flags;
67270+#endif
67271+
67272+#ifdef CONFIG_PAX_DLRESOLVE
67273+ unsigned long call_dl_resolve;
67274+#endif
67275+
67276+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
67277+ unsigned long call_syscall;
67278+#endif
67279+
67280+#ifdef CONFIG_PAX_ASLR
67281+ unsigned long delta_mmap; /* randomized offset */
67282+ unsigned long delta_stack; /* randomized offset */
67283+#endif
67284+
67285 };
67286
67287 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
67288diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
67289index 4e02ee2..afb159e 100644
67290--- a/include/linux/mmu_notifier.h
67291+++ b/include/linux/mmu_notifier.h
67292@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
67293 */
67294 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
67295 ({ \
67296- pte_t __pte; \
67297+ pte_t ___pte; \
67298 struct vm_area_struct *___vma = __vma; \
67299 unsigned long ___address = __address; \
67300- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
67301+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
67302 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
67303- __pte; \
67304+ ___pte; \
67305 })
67306
67307 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
67308diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
67309index 6c31a2a..4b0e930 100644
67310--- a/include/linux/mmzone.h
67311+++ b/include/linux/mmzone.h
67312@@ -350,7 +350,7 @@ struct zone {
67313 unsigned long flags; /* zone flags, see below */
67314
67315 /* Zone statistics */
67316- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67317+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67318
67319 /*
67320 * prev_priority holds the scanning priority for this zone. It is
67321diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
67322index f58e9d8..3503935 100644
67323--- a/include/linux/mod_devicetable.h
67324+++ b/include/linux/mod_devicetable.h
67325@@ -12,7 +12,7 @@
67326 typedef unsigned long kernel_ulong_t;
67327 #endif
67328
67329-#define PCI_ANY_ID (~0)
67330+#define PCI_ANY_ID ((__u16)~0)
67331
67332 struct pci_device_id {
67333 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
67334@@ -131,7 +131,7 @@ struct usb_device_id {
67335 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
67336 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
67337
67338-#define HID_ANY_ID (~0)
67339+#define HID_ANY_ID (~0U)
67340
67341 struct hid_device_id {
67342 __u16 bus;
67343diff --git a/include/linux/module.h b/include/linux/module.h
67344index 482efc8..642032b 100644
67345--- a/include/linux/module.h
67346+++ b/include/linux/module.h
67347@@ -16,6 +16,7 @@
67348 #include <linux/kobject.h>
67349 #include <linux/moduleparam.h>
67350 #include <linux/tracepoint.h>
67351+#include <linux/fs.h>
67352
67353 #include <asm/local.h>
67354 #include <asm/module.h>
67355@@ -287,16 +288,16 @@ struct module
67356 int (*init)(void);
67357
67358 /* If this is non-NULL, vfree after init() returns */
67359- void *module_init;
67360+ void *module_init_rx, *module_init_rw;
67361
67362 /* Here is the actual code + data, vfree'd on unload. */
67363- void *module_core;
67364+ void *module_core_rx, *module_core_rw;
67365
67366 /* Here are the sizes of the init and core sections */
67367- unsigned int init_size, core_size;
67368+ unsigned int init_size_rw, core_size_rw;
67369
67370 /* The size of the executable code in each section. */
67371- unsigned int init_text_size, core_text_size;
67372+ unsigned int init_size_rx, core_size_rx;
67373
67374 /* Arch-specific module values */
67375 struct mod_arch_specific arch;
67376@@ -345,6 +346,10 @@ struct module
67377 #ifdef CONFIG_EVENT_TRACING
67378 struct ftrace_event_call *trace_events;
67379 unsigned int num_trace_events;
67380+ struct file_operations trace_id;
67381+ struct file_operations trace_enable;
67382+ struct file_operations trace_format;
67383+ struct file_operations trace_filter;
67384 #endif
67385 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
67386 unsigned long *ftrace_callsites;
67387@@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
67388 bool is_module_address(unsigned long addr);
67389 bool is_module_text_address(unsigned long addr);
67390
67391+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
67392+{
67393+
67394+#ifdef CONFIG_PAX_KERNEXEC
67395+ if (ktla_ktva(addr) >= (unsigned long)start &&
67396+ ktla_ktva(addr) < (unsigned long)start + size)
67397+ return 1;
67398+#endif
67399+
67400+ return ((void *)addr >= start && (void *)addr < start + size);
67401+}
67402+
67403+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
67404+{
67405+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
67406+}
67407+
67408+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
67409+{
67410+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
67411+}
67412+
67413+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
67414+{
67415+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
67416+}
67417+
67418+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
67419+{
67420+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
67421+}
67422+
67423 static inline int within_module_core(unsigned long addr, struct module *mod)
67424 {
67425- return (unsigned long)mod->module_core <= addr &&
67426- addr < (unsigned long)mod->module_core + mod->core_size;
67427+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
67428 }
67429
67430 static inline int within_module_init(unsigned long addr, struct module *mod)
67431 {
67432- return (unsigned long)mod->module_init <= addr &&
67433- addr < (unsigned long)mod->module_init + mod->init_size;
67434+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
67435 }
67436
67437 /* Search for module by name: must hold module_mutex. */
67438diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
67439index c1f40c2..682ca53 100644
67440--- a/include/linux/moduleloader.h
67441+++ b/include/linux/moduleloader.h
67442@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
67443 sections. Returns NULL on failure. */
67444 void *module_alloc(unsigned long size);
67445
67446+#ifdef CONFIG_PAX_KERNEXEC
67447+void *module_alloc_exec(unsigned long size);
67448+#else
67449+#define module_alloc_exec(x) module_alloc(x)
67450+#endif
67451+
67452 /* Free memory returned from module_alloc. */
67453 void module_free(struct module *mod, void *module_region);
67454
67455+#ifdef CONFIG_PAX_KERNEXEC
67456+void module_free_exec(struct module *mod, void *module_region);
67457+#else
67458+#define module_free_exec(x, y) module_free((x), (y))
67459+#endif
67460+
67461 /* Apply the given relocation to the (simplified) ELF. Return -error
67462 or 0. */
67463 int apply_relocate(Elf_Shdr *sechdrs,
67464diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
67465index 82a9124..8a5f622 100644
67466--- a/include/linux/moduleparam.h
67467+++ b/include/linux/moduleparam.h
67468@@ -132,7 +132,7 @@ struct kparam_array
67469
67470 /* Actually copy string: maxlen param is usually sizeof(string). */
67471 #define module_param_string(name, string, len, perm) \
67472- static const struct kparam_string __param_string_##name \
67473+ static const struct kparam_string __param_string_##name __used \
67474 = { len, string }; \
67475 __module_param_call(MODULE_PARAM_PREFIX, name, \
67476 param_set_copystring, param_get_string, \
67477@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
67478
67479 /* Comma-separated array: *nump is set to number they actually specified. */
67480 #define module_param_array_named(name, array, type, nump, perm) \
67481- static const struct kparam_array __param_arr_##name \
67482+ static const struct kparam_array __param_arr_##name __used \
67483 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
67484 sizeof(array[0]), array }; \
67485 __module_param_call(MODULE_PARAM_PREFIX, name, \
67486diff --git a/include/linux/mutex.h b/include/linux/mutex.h
67487index 878cab4..c92cb3e 100644
67488--- a/include/linux/mutex.h
67489+++ b/include/linux/mutex.h
67490@@ -51,7 +51,7 @@ struct mutex {
67491 spinlock_t wait_lock;
67492 struct list_head wait_list;
67493 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
67494- struct thread_info *owner;
67495+ struct task_struct *owner;
67496 #endif
67497 #ifdef CONFIG_DEBUG_MUTEXES
67498 const char *name;
67499diff --git a/include/linux/namei.h b/include/linux/namei.h
67500index ec0f607..d19e675 100644
67501--- a/include/linux/namei.h
67502+++ b/include/linux/namei.h
67503@@ -22,7 +22,7 @@ struct nameidata {
67504 unsigned int flags;
67505 int last_type;
67506 unsigned depth;
67507- char *saved_names[MAX_NESTED_LINKS + 1];
67508+ const char *saved_names[MAX_NESTED_LINKS + 1];
67509
67510 /* Intent data */
67511 union {
67512@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
67513 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
67514 extern void unlock_rename(struct dentry *, struct dentry *);
67515
67516-static inline void nd_set_link(struct nameidata *nd, char *path)
67517+static inline void nd_set_link(struct nameidata *nd, const char *path)
67518 {
67519 nd->saved_names[nd->depth] = path;
67520 }
67521
67522-static inline char *nd_get_link(struct nameidata *nd)
67523+static inline const char *nd_get_link(const struct nameidata *nd)
67524 {
67525 return nd->saved_names[nd->depth];
67526 }
67527diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
67528index 9d7e8f7..04428c5 100644
67529--- a/include/linux/netdevice.h
67530+++ b/include/linux/netdevice.h
67531@@ -637,6 +637,7 @@ struct net_device_ops {
67532 u16 xid);
67533 #endif
67534 };
67535+typedef struct net_device_ops __no_const net_device_ops_no_const;
67536
67537 /*
67538 * The DEVICE structure.
67539diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
67540new file mode 100644
67541index 0000000..33f4af8
67542--- /dev/null
67543+++ b/include/linux/netfilter/xt_gradm.h
67544@@ -0,0 +1,9 @@
67545+#ifndef _LINUX_NETFILTER_XT_GRADM_H
67546+#define _LINUX_NETFILTER_XT_GRADM_H 1
67547+
67548+struct xt_gradm_mtinfo {
67549+ __u16 flags;
67550+ __u16 invflags;
67551+};
67552+
67553+#endif
67554diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
67555index b359c4a..c08b334 100644
67556--- a/include/linux/nodemask.h
67557+++ b/include/linux/nodemask.h
67558@@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
67559
67560 #define any_online_node(mask) \
67561 ({ \
67562- int node; \
67563- for_each_node_mask(node, (mask)) \
67564- if (node_online(node)) \
67565+ int __node; \
67566+ for_each_node_mask(__node, (mask)) \
67567+ if (node_online(__node)) \
67568 break; \
67569- node; \
67570+ __node; \
67571 })
67572
67573 #define num_online_nodes() num_node_state(N_ONLINE)
67574diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
67575index 5171639..7cf4235 100644
67576--- a/include/linux/oprofile.h
67577+++ b/include/linux/oprofile.h
67578@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
67579 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
67580 char const * name, ulong * val);
67581
67582-/** Create a file for read-only access to an atomic_t. */
67583+/** Create a file for read-only access to an atomic_unchecked_t. */
67584 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
67585- char const * name, atomic_t * val);
67586+ char const * name, atomic_unchecked_t * val);
67587
67588 /** create a directory */
67589 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
67590diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
67591index 3c62ed4..8924c7c 100644
67592--- a/include/linux/pagemap.h
67593+++ b/include/linux/pagemap.h
67594@@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
67595 if (((unsigned long)uaddr & PAGE_MASK) !=
67596 ((unsigned long)end & PAGE_MASK))
67597 ret = __get_user(c, end);
67598+ (void)c;
67599 }
67600+ (void)c;
67601 return ret;
67602 }
67603
67604diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
67605index 81c9689..a567a55 100644
67606--- a/include/linux/perf_event.h
67607+++ b/include/linux/perf_event.h
67608@@ -476,7 +476,7 @@ struct hw_perf_event {
67609 struct hrtimer hrtimer;
67610 };
67611 };
67612- atomic64_t prev_count;
67613+ atomic64_unchecked_t prev_count;
67614 u64 sample_period;
67615 u64 last_period;
67616 atomic64_t period_left;
67617@@ -557,7 +557,7 @@ struct perf_event {
67618 const struct pmu *pmu;
67619
67620 enum perf_event_active_state state;
67621- atomic64_t count;
67622+ atomic64_unchecked_t count;
67623
67624 /*
67625 * These are the total time in nanoseconds that the event
67626@@ -595,8 +595,8 @@ struct perf_event {
67627 * These accumulate total time (in nanoseconds) that children
67628 * events have been enabled and running, respectively.
67629 */
67630- atomic64_t child_total_time_enabled;
67631- atomic64_t child_total_time_running;
67632+ atomic64_unchecked_t child_total_time_enabled;
67633+ atomic64_unchecked_t child_total_time_running;
67634
67635 /*
67636 * Protect attach/detach and child_list:
67637diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
67638index b43a9e0..b77d869 100644
67639--- a/include/linux/pipe_fs_i.h
67640+++ b/include/linux/pipe_fs_i.h
67641@@ -46,9 +46,9 @@ struct pipe_inode_info {
67642 wait_queue_head_t wait;
67643 unsigned int nrbufs, curbuf;
67644 struct page *tmp_page;
67645- unsigned int readers;
67646- unsigned int writers;
67647- unsigned int waiting_writers;
67648+ atomic_t readers;
67649+ atomic_t writers;
67650+ atomic_t waiting_writers;
67651 unsigned int r_counter;
67652 unsigned int w_counter;
67653 struct fasync_struct *fasync_readers;
67654diff --git a/include/linux/poison.h b/include/linux/poison.h
67655index 34066ff..e95d744 100644
67656--- a/include/linux/poison.h
67657+++ b/include/linux/poison.h
67658@@ -19,8 +19,8 @@
67659 * under normal circumstances, used to verify that nobody uses
67660 * non-initialized list entries.
67661 */
67662-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
67663-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
67664+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
67665+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
67666
67667 /********** include/linux/timer.h **********/
67668 /*
67669diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
67670index 4f71bf4..77ffa64 100644
67671--- a/include/linux/posix-timers.h
67672+++ b/include/linux/posix-timers.h
67673@@ -67,7 +67,7 @@ struct k_itimer {
67674 };
67675
67676 struct k_clock {
67677- int res; /* in nanoseconds */
67678+ const int res; /* in nanoseconds */
67679 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
67680 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
67681 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
67682diff --git a/include/linux/preempt.h b/include/linux/preempt.h
67683index 72b1a10..13303a9 100644
67684--- a/include/linux/preempt.h
67685+++ b/include/linux/preempt.h
67686@@ -110,7 +110,7 @@ struct preempt_ops {
67687 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
67688 void (*sched_out)(struct preempt_notifier *notifier,
67689 struct task_struct *next);
67690-};
67691+} __no_const;
67692
67693 /**
67694 * preempt_notifier - key for installing preemption notifiers
67695diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
67696index 379eaed..1bf73e3 100644
67697--- a/include/linux/proc_fs.h
67698+++ b/include/linux/proc_fs.h
67699@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
67700 return proc_create_data(name, mode, parent, proc_fops, NULL);
67701 }
67702
67703+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
67704+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
67705+{
67706+#ifdef CONFIG_GRKERNSEC_PROC_USER
67707+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
67708+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67709+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
67710+#else
67711+ return proc_create_data(name, mode, parent, proc_fops, NULL);
67712+#endif
67713+}
67714+
67715+
67716 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
67717 mode_t mode, struct proc_dir_entry *base,
67718 read_proc_t *read_proc, void * data)
67719@@ -256,7 +269,7 @@ union proc_op {
67720 int (*proc_show)(struct seq_file *m,
67721 struct pid_namespace *ns, struct pid *pid,
67722 struct task_struct *task);
67723-};
67724+} __no_const;
67725
67726 struct ctl_table_header;
67727 struct ctl_table;
67728diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
67729index 7456d7d..6c1cfc9 100644
67730--- a/include/linux/ptrace.h
67731+++ b/include/linux/ptrace.h
67732@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
67733 extern void exit_ptrace(struct task_struct *tracer);
67734 #define PTRACE_MODE_READ 1
67735 #define PTRACE_MODE_ATTACH 2
67736-/* Returns 0 on success, -errno on denial. */
67737-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
67738 /* Returns true on success, false on denial. */
67739 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
67740+/* Returns true on success, false on denial. */
67741+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
67742
67743 static inline int ptrace_reparented(struct task_struct *child)
67744 {
67745diff --git a/include/linux/random.h b/include/linux/random.h
67746index 2948046..3262567 100644
67747--- a/include/linux/random.h
67748+++ b/include/linux/random.h
67749@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
67750 u32 random32(void);
67751 void srandom32(u32 seed);
67752
67753+static inline unsigned long pax_get_random_long(void)
67754+{
67755+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
67756+}
67757+
67758 #endif /* __KERNEL___ */
67759
67760 #endif /* _LINUX_RANDOM_H */
67761diff --git a/include/linux/reboot.h b/include/linux/reboot.h
67762index 988e55f..17cb4ef 100644
67763--- a/include/linux/reboot.h
67764+++ b/include/linux/reboot.h
67765@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
67766 * Architecture-specific implementations of sys_reboot commands.
67767 */
67768
67769-extern void machine_restart(char *cmd);
67770-extern void machine_halt(void);
67771-extern void machine_power_off(void);
67772+extern void machine_restart(char *cmd) __noreturn;
67773+extern void machine_halt(void) __noreturn;
67774+extern void machine_power_off(void) __noreturn;
67775
67776 extern void machine_shutdown(void);
67777 struct pt_regs;
67778@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
67779 */
67780
67781 extern void kernel_restart_prepare(char *cmd);
67782-extern void kernel_restart(char *cmd);
67783-extern void kernel_halt(void);
67784-extern void kernel_power_off(void);
67785+extern void kernel_restart(char *cmd) __noreturn;
67786+extern void kernel_halt(void) __noreturn;
67787+extern void kernel_power_off(void) __noreturn;
67788
67789 void ctrl_alt_del(void);
67790
67791@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
67792 * Emergency restart, callable from an interrupt handler.
67793 */
67794
67795-extern void emergency_restart(void);
67796+extern void emergency_restart(void) __noreturn;
67797 #include <asm/emergency-restart.h>
67798
67799 #endif
67800diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
67801index dd31e7b..5b03c5c 100644
67802--- a/include/linux/reiserfs_fs.h
67803+++ b/include/linux/reiserfs_fs.h
67804@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
67805 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
67806
67807 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
67808-#define get_generation(s) atomic_read (&fs_generation(s))
67809+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
67810 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
67811 #define __fs_changed(gen,s) (gen != get_generation (s))
67812 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
67813@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
67814 */
67815
67816 struct item_operations {
67817- int (*bytes_number) (struct item_head * ih, int block_size);
67818- void (*decrement_key) (struct cpu_key *);
67819- int (*is_left_mergeable) (struct reiserfs_key * ih,
67820+ int (* const bytes_number) (struct item_head * ih, int block_size);
67821+ void (* const decrement_key) (struct cpu_key *);
67822+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
67823 unsigned long bsize);
67824- void (*print_item) (struct item_head *, char *item);
67825- void (*check_item) (struct item_head *, char *item);
67826+ void (* const print_item) (struct item_head *, char *item);
67827+ void (* const check_item) (struct item_head *, char *item);
67828
67829- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
67830+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
67831 int is_affected, int insert_size);
67832- int (*check_left) (struct virtual_item * vi, int free,
67833+ int (* const check_left) (struct virtual_item * vi, int free,
67834 int start_skip, int end_skip);
67835- int (*check_right) (struct virtual_item * vi, int free);
67836- int (*part_size) (struct virtual_item * vi, int from, int to);
67837- int (*unit_num) (struct virtual_item * vi);
67838- void (*print_vi) (struct virtual_item * vi);
67839+ int (* const check_right) (struct virtual_item * vi, int free);
67840+ int (* const part_size) (struct virtual_item * vi, int from, int to);
67841+ int (* const unit_num) (struct virtual_item * vi);
67842+ void (* const print_vi) (struct virtual_item * vi);
67843 };
67844
67845-extern struct item_operations *item_ops[TYPE_ANY + 1];
67846+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
67847
67848 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
67849 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
67850diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
67851index dab68bb..0688727 100644
67852--- a/include/linux/reiserfs_fs_sb.h
67853+++ b/include/linux/reiserfs_fs_sb.h
67854@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
67855 /* Comment? -Hans */
67856 wait_queue_head_t s_wait;
67857 /* To be obsoleted soon by per buffer seals.. -Hans */
67858- atomic_t s_generation_counter; // increased by one every time the
67859+ atomic_unchecked_t s_generation_counter; // increased by one every time the
67860 // tree gets re-balanced
67861 unsigned long s_properties; /* File system properties. Currently holds
67862 on-disk FS format */
67863diff --git a/include/linux/relay.h b/include/linux/relay.h
67864index 14a86bc..17d0700 100644
67865--- a/include/linux/relay.h
67866+++ b/include/linux/relay.h
67867@@ -159,7 +159,7 @@ struct rchan_callbacks
67868 * The callback should return 0 if successful, negative if not.
67869 */
67870 int (*remove_buf_file)(struct dentry *dentry);
67871-};
67872+} __no_const;
67873
67874 /*
67875 * CONFIG_RELAY kernel API, kernel/relay.c
67876diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
67877index 3392c59..a746428 100644
67878--- a/include/linux/rfkill.h
67879+++ b/include/linux/rfkill.h
67880@@ -144,6 +144,7 @@ struct rfkill_ops {
67881 void (*query)(struct rfkill *rfkill, void *data);
67882 int (*set_block)(void *data, bool blocked);
67883 };
67884+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
67885
67886 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
67887 /**
67888diff --git a/include/linux/sched.h b/include/linux/sched.h
67889index 71849bf..40217dc 100644
67890--- a/include/linux/sched.h
67891+++ b/include/linux/sched.h
67892@@ -101,6 +101,7 @@ struct bio;
67893 struct fs_struct;
67894 struct bts_context;
67895 struct perf_event_context;
67896+struct linux_binprm;
67897
67898 /*
67899 * List of flags we want to share for kernel threads,
67900@@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
67901 extern signed long schedule_timeout_uninterruptible(signed long timeout);
67902 asmlinkage void __schedule(void);
67903 asmlinkage void schedule(void);
67904-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
67905+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
67906
67907 struct nsproxy;
67908 struct user_namespace;
67909@@ -371,9 +372,12 @@ struct user_namespace;
67910 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
67911
67912 extern int sysctl_max_map_count;
67913+extern unsigned long sysctl_heap_stack_gap;
67914
67915 #include <linux/aio.h>
67916
67917+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
67918+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
67919 extern unsigned long
67920 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
67921 unsigned long, unsigned long);
67922@@ -666,6 +670,16 @@ struct signal_struct {
67923 struct tty_audit_buf *tty_audit_buf;
67924 #endif
67925
67926+#ifdef CONFIG_GRKERNSEC
67927+ u32 curr_ip;
67928+ u32 saved_ip;
67929+ u32 gr_saddr;
67930+ u32 gr_daddr;
67931+ u16 gr_sport;
67932+ u16 gr_dport;
67933+ u8 used_accept:1;
67934+#endif
67935+
67936 int oom_adj; /* OOM kill score adjustment (bit shift) */
67937 };
67938
67939@@ -723,6 +737,11 @@ struct user_struct {
67940 struct key *session_keyring; /* UID's default session keyring */
67941 #endif
67942
67943+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
67944+ unsigned int banned;
67945+ unsigned long ban_expires;
67946+#endif
67947+
67948 /* Hash table maintenance information */
67949 struct hlist_node uidhash_node;
67950 uid_t uid;
67951@@ -1328,8 +1347,8 @@ struct task_struct {
67952 struct list_head thread_group;
67953
67954 struct completion *vfork_done; /* for vfork() */
67955- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
67956- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
67957+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
67958+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
67959
67960 cputime_t utime, stime, utimescaled, stimescaled;
67961 cputime_t gtime;
67962@@ -1343,16 +1362,6 @@ struct task_struct {
67963 struct task_cputime cputime_expires;
67964 struct list_head cpu_timers[3];
67965
67966-/* process credentials */
67967- const struct cred *real_cred; /* objective and real subjective task
67968- * credentials (COW) */
67969- const struct cred *cred; /* effective (overridable) subjective task
67970- * credentials (COW) */
67971- struct mutex cred_guard_mutex; /* guard against foreign influences on
67972- * credential calculations
67973- * (notably. ptrace) */
67974- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
67975-
67976 char comm[TASK_COMM_LEN]; /* executable name excluding path
67977 - access with [gs]et_task_comm (which lock
67978 it with task_lock())
67979@@ -1369,6 +1378,10 @@ struct task_struct {
67980 #endif
67981 /* CPU-specific state of this task */
67982 struct thread_struct thread;
67983+/* thread_info moved to task_struct */
67984+#ifdef CONFIG_X86
67985+ struct thread_info tinfo;
67986+#endif
67987 /* filesystem information */
67988 struct fs_struct *fs;
67989 /* open file information */
67990@@ -1436,6 +1449,15 @@ struct task_struct {
67991 int hardirq_context;
67992 int softirq_context;
67993 #endif
67994+
67995+/* process credentials */
67996+ const struct cred *real_cred; /* objective and real subjective task
67997+ * credentials (COW) */
67998+ struct mutex cred_guard_mutex; /* guard against foreign influences on
67999+ * credential calculations
68000+ * (notably. ptrace) */
68001+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68002+
68003 #ifdef CONFIG_LOCKDEP
68004 # define MAX_LOCK_DEPTH 48UL
68005 u64 curr_chain_key;
68006@@ -1456,6 +1478,9 @@ struct task_struct {
68007
68008 struct backing_dev_info *backing_dev_info;
68009
68010+ const struct cred *cred; /* effective (overridable) subjective task
68011+ * credentials (COW) */
68012+
68013 struct io_context *io_context;
68014
68015 unsigned long ptrace_message;
68016@@ -1519,6 +1544,21 @@ struct task_struct {
68017 unsigned long default_timer_slack_ns;
68018
68019 struct list_head *scm_work_list;
68020+
68021+#ifdef CONFIG_GRKERNSEC
68022+ /* grsecurity */
68023+ struct dentry *gr_chroot_dentry;
68024+ struct acl_subject_label *acl;
68025+ struct acl_role_label *role;
68026+ struct file *exec_file;
68027+ u16 acl_role_id;
68028+ /* is this the task that authenticated to the special role */
68029+ u8 acl_sp_role;
68030+ u8 is_writable;
68031+ u8 brute;
68032+ u8 gr_is_chrooted;
68033+#endif
68034+
68035 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
68036 /* Index of current stored adress in ret_stack */
68037 int curr_ret_stack;
68038@@ -1542,6 +1582,57 @@ struct task_struct {
68039 #endif /* CONFIG_TRACING */
68040 };
68041
68042+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
68043+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
68044+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
68045+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
68046+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
68047+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
68048+
68049+#ifdef CONFIG_PAX_SOFTMODE
68050+extern int pax_softmode;
68051+#endif
68052+
68053+extern int pax_check_flags(unsigned long *);
68054+
68055+/* if tsk != current then task_lock must be held on it */
68056+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68057+static inline unsigned long pax_get_flags(struct task_struct *tsk)
68058+{
68059+ if (likely(tsk->mm))
68060+ return tsk->mm->pax_flags;
68061+ else
68062+ return 0UL;
68063+}
68064+
68065+/* if tsk != current then task_lock must be held on it */
68066+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
68067+{
68068+ if (likely(tsk->mm)) {
68069+ tsk->mm->pax_flags = flags;
68070+ return 0;
68071+ }
68072+ return -EINVAL;
68073+}
68074+#endif
68075+
68076+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
68077+extern void pax_set_initial_flags(struct linux_binprm *bprm);
68078+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
68079+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
68080+#endif
68081+
68082+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
68083+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
68084+extern void pax_report_refcount_overflow(struct pt_regs *regs);
68085+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
68086+
68087+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
68088+extern void pax_track_stack(void);
68089+#else
68090+static inline void pax_track_stack(void) {}
68091+#endif
68092+
68093 /* Future-safe accessor for struct task_struct's cpus_allowed. */
68094 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
68095
68096@@ -1740,7 +1831,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
68097 #define PF_DUMPCORE 0x00000200 /* dumped core */
68098 #define PF_SIGNALED 0x00000400 /* killed by a signal */
68099 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
68100-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
68101+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
68102 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
68103 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
68104 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
68105@@ -1978,7 +2069,9 @@ void yield(void);
68106 extern struct exec_domain default_exec_domain;
68107
68108 union thread_union {
68109+#ifndef CONFIG_X86
68110 struct thread_info thread_info;
68111+#endif
68112 unsigned long stack[THREAD_SIZE/sizeof(long)];
68113 };
68114
68115@@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
68116 */
68117
68118 extern struct task_struct *find_task_by_vpid(pid_t nr);
68119+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
68120 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
68121 struct pid_namespace *ns);
68122
68123@@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
68124 extern void exit_itimers(struct signal_struct *);
68125 extern void flush_itimer_signals(void);
68126
68127-extern NORET_TYPE void do_group_exit(int);
68128+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
68129
68130 extern void daemonize(const char *, ...);
68131 extern int allow_signal(int);
68132@@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
68133
68134 #endif
68135
68136-static inline int object_is_on_stack(void *obj)
68137+static inline int object_starts_on_stack(void *obj)
68138 {
68139- void *stack = task_stack_page(current);
68140+ const void *stack = task_stack_page(current);
68141
68142 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
68143 }
68144
68145+#ifdef CONFIG_PAX_USERCOPY
68146+extern int object_is_on_stack(const void *obj, unsigned long len);
68147+#endif
68148+
68149 extern void thread_info_cache_init(void);
68150
68151 #ifdef CONFIG_DEBUG_STACK_USAGE
68152diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
68153index 1ee2c05..81b7ec4 100644
68154--- a/include/linux/screen_info.h
68155+++ b/include/linux/screen_info.h
68156@@ -42,7 +42,8 @@ struct screen_info {
68157 __u16 pages; /* 0x32 */
68158 __u16 vesa_attributes; /* 0x34 */
68159 __u32 capabilities; /* 0x36 */
68160- __u8 _reserved[6]; /* 0x3a */
68161+ __u16 vesapm_size; /* 0x3a */
68162+ __u8 _reserved[4]; /* 0x3c */
68163 } __attribute__((packed));
68164
68165 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
68166diff --git a/include/linux/security.h b/include/linux/security.h
68167index d40d23f..253bd14 100644
68168--- a/include/linux/security.h
68169+++ b/include/linux/security.h
68170@@ -34,6 +34,7 @@
68171 #include <linux/key.h>
68172 #include <linux/xfrm.h>
68173 #include <linux/gfp.h>
68174+#include <linux/grsecurity.h>
68175 #include <net/flow.h>
68176
68177 /* Maximum number of letters for an LSM name string */
68178diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
68179index 8366d8f..2307490 100644
68180--- a/include/linux/seq_file.h
68181+++ b/include/linux/seq_file.h
68182@@ -32,6 +32,7 @@ struct seq_operations {
68183 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
68184 int (*show) (struct seq_file *m, void *v);
68185 };
68186+typedef struct seq_operations __no_const seq_operations_no_const;
68187
68188 #define SEQ_SKIP 1
68189
68190diff --git a/include/linux/shm.h b/include/linux/shm.h
68191index eca6235..c7417ed 100644
68192--- a/include/linux/shm.h
68193+++ b/include/linux/shm.h
68194@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
68195 pid_t shm_cprid;
68196 pid_t shm_lprid;
68197 struct user_struct *mlock_user;
68198+#ifdef CONFIG_GRKERNSEC
68199+ time_t shm_createtime;
68200+ pid_t shm_lapid;
68201+#endif
68202 };
68203
68204 /* shm_mode upper byte flags */
68205diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
68206index bcdd660..6e12e11 100644
68207--- a/include/linux/skbuff.h
68208+++ b/include/linux/skbuff.h
68209@@ -14,6 +14,7 @@
68210 #ifndef _LINUX_SKBUFF_H
68211 #define _LINUX_SKBUFF_H
68212
68213+#include <linux/const.h>
68214 #include <linux/kernel.h>
68215 #include <linux/kmemcheck.h>
68216 #include <linux/compiler.h>
68217@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
68218 */
68219 static inline int skb_queue_empty(const struct sk_buff_head *list)
68220 {
68221- return list->next == (struct sk_buff *)list;
68222+ return list->next == (const struct sk_buff *)list;
68223 }
68224
68225 /**
68226@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
68227 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
68228 const struct sk_buff *skb)
68229 {
68230- return (skb->next == (struct sk_buff *) list);
68231+ return (skb->next == (const struct sk_buff *) list);
68232 }
68233
68234 /**
68235@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
68236 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
68237 const struct sk_buff *skb)
68238 {
68239- return (skb->prev == (struct sk_buff *) list);
68240+ return (skb->prev == (const struct sk_buff *) list);
68241 }
68242
68243 /**
68244@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
68245 * headroom, you should not reduce this.
68246 */
68247 #ifndef NET_SKB_PAD
68248-#define NET_SKB_PAD 32
68249+#define NET_SKB_PAD (_AC(32,UL))
68250 #endif
68251
68252 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
68253diff --git a/include/linux/slab.h b/include/linux/slab.h
68254index 2da8372..a3be824 100644
68255--- a/include/linux/slab.h
68256+++ b/include/linux/slab.h
68257@@ -11,12 +11,20 @@
68258
68259 #include <linux/gfp.h>
68260 #include <linux/types.h>
68261+#include <linux/err.h>
68262
68263 /*
68264 * Flags to pass to kmem_cache_create().
68265 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
68266 */
68267 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
68268+
68269+#ifdef CONFIG_PAX_USERCOPY
68270+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
68271+#else
68272+#define SLAB_USERCOPY 0x00000000UL
68273+#endif
68274+
68275 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
68276 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
68277 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
68278@@ -82,10 +90,13 @@
68279 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
68280 * Both make kfree a no-op.
68281 */
68282-#define ZERO_SIZE_PTR ((void *)16)
68283+#define ZERO_SIZE_PTR \
68284+({ \
68285+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
68286+ (void *)(-MAX_ERRNO-1L); \
68287+})
68288
68289-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
68290- (unsigned long)ZERO_SIZE_PTR)
68291+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
68292
68293 /*
68294 * struct kmem_cache related prototypes
68295@@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
68296 void kfree(const void *);
68297 void kzfree(const void *);
68298 size_t ksize(const void *);
68299+void check_object_size(const void *ptr, unsigned long n, bool to);
68300
68301 /*
68302 * Allocator specific definitions. These are mainly used to establish optimized
68303@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
68304
68305 void __init kmem_cache_init_late(void);
68306
68307+#define kmalloc(x, y) \
68308+({ \
68309+ void *___retval; \
68310+ intoverflow_t ___x = (intoverflow_t)x; \
68311+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
68312+ ___retval = NULL; \
68313+ else \
68314+ ___retval = kmalloc((size_t)___x, (y)); \
68315+ ___retval; \
68316+})
68317+
68318+#define kmalloc_node(x, y, z) \
68319+({ \
68320+ void *___retval; \
68321+ intoverflow_t ___x = (intoverflow_t)x; \
68322+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
68323+ ___retval = NULL; \
68324+ else \
68325+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
68326+ ___retval; \
68327+})
68328+
68329+#define kzalloc(x, y) \
68330+({ \
68331+ void *___retval; \
68332+ intoverflow_t ___x = (intoverflow_t)x; \
68333+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
68334+ ___retval = NULL; \
68335+ else \
68336+ ___retval = kzalloc((size_t)___x, (y)); \
68337+ ___retval; \
68338+})
68339+
68340 #endif /* _LINUX_SLAB_H */
68341diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
68342index 850d057..d9dfe3c 100644
68343--- a/include/linux/slab_def.h
68344+++ b/include/linux/slab_def.h
68345@@ -69,10 +69,10 @@ struct kmem_cache {
68346 unsigned long node_allocs;
68347 unsigned long node_frees;
68348 unsigned long node_overflow;
68349- atomic_t allochit;
68350- atomic_t allocmiss;
68351- atomic_t freehit;
68352- atomic_t freemiss;
68353+ atomic_unchecked_t allochit;
68354+ atomic_unchecked_t allocmiss;
68355+ atomic_unchecked_t freehit;
68356+ atomic_unchecked_t freemiss;
68357
68358 /*
68359 * If debugging is enabled, then the allocator can add additional
68360diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
68361index 5ad70a6..57f9f65 100644
68362--- a/include/linux/slub_def.h
68363+++ b/include/linux/slub_def.h
68364@@ -86,7 +86,7 @@ struct kmem_cache {
68365 struct kmem_cache_order_objects max;
68366 struct kmem_cache_order_objects min;
68367 gfp_t allocflags; /* gfp flags to use on each alloc */
68368- int refcount; /* Refcount for slab cache destroy */
68369+ atomic_t refcount; /* Refcount for slab cache destroy */
68370 void (*ctor)(void *);
68371 int inuse; /* Offset to metadata */
68372 int align; /* Alignment */
68373@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
68374 #endif
68375
68376 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
68377-void *__kmalloc(size_t size, gfp_t flags);
68378+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
68379
68380 #ifdef CONFIG_KMEMTRACE
68381 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
68382diff --git a/include/linux/sonet.h b/include/linux/sonet.h
68383index 67ad11f..0bbd8af 100644
68384--- a/include/linux/sonet.h
68385+++ b/include/linux/sonet.h
68386@@ -61,7 +61,7 @@ struct sonet_stats {
68387 #include <asm/atomic.h>
68388
68389 struct k_sonet_stats {
68390-#define __HANDLE_ITEM(i) atomic_t i
68391+#define __HANDLE_ITEM(i) atomic_unchecked_t i
68392 __SONET_ITEMS
68393 #undef __HANDLE_ITEM
68394 };
68395diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
68396index 6f52b4d..5500323 100644
68397--- a/include/linux/sunrpc/cache.h
68398+++ b/include/linux/sunrpc/cache.h
68399@@ -125,7 +125,7 @@ struct cache_detail {
68400 */
68401 struct cache_req {
68402 struct cache_deferred_req *(*defer)(struct cache_req *req);
68403-};
68404+} __no_const;
68405 /* this must be embedded in a deferred_request that is being
68406 * delayed awaiting cache-fill
68407 */
68408diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
68409index 8ed9642..101ceab 100644
68410--- a/include/linux/sunrpc/clnt.h
68411+++ b/include/linux/sunrpc/clnt.h
68412@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
68413 {
68414 switch (sap->sa_family) {
68415 case AF_INET:
68416- return ntohs(((struct sockaddr_in *)sap)->sin_port);
68417+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
68418 case AF_INET6:
68419- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
68420+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
68421 }
68422 return 0;
68423 }
68424@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
68425 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
68426 const struct sockaddr *src)
68427 {
68428- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
68429+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
68430 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
68431
68432 dsin->sin_family = ssin->sin_family;
68433@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
68434 if (sa->sa_family != AF_INET6)
68435 return 0;
68436
68437- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
68438+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
68439 }
68440
68441 #endif /* __KERNEL__ */
68442diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
68443index c14fe86..393245e 100644
68444--- a/include/linux/sunrpc/svc_rdma.h
68445+++ b/include/linux/sunrpc/svc_rdma.h
68446@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
68447 extern unsigned int svcrdma_max_requests;
68448 extern unsigned int svcrdma_max_req_size;
68449
68450-extern atomic_t rdma_stat_recv;
68451-extern atomic_t rdma_stat_read;
68452-extern atomic_t rdma_stat_write;
68453-extern atomic_t rdma_stat_sq_starve;
68454-extern atomic_t rdma_stat_rq_starve;
68455-extern atomic_t rdma_stat_rq_poll;
68456-extern atomic_t rdma_stat_rq_prod;
68457-extern atomic_t rdma_stat_sq_poll;
68458-extern atomic_t rdma_stat_sq_prod;
68459+extern atomic_unchecked_t rdma_stat_recv;
68460+extern atomic_unchecked_t rdma_stat_read;
68461+extern atomic_unchecked_t rdma_stat_write;
68462+extern atomic_unchecked_t rdma_stat_sq_starve;
68463+extern atomic_unchecked_t rdma_stat_rq_starve;
68464+extern atomic_unchecked_t rdma_stat_rq_poll;
68465+extern atomic_unchecked_t rdma_stat_rq_prod;
68466+extern atomic_unchecked_t rdma_stat_sq_poll;
68467+extern atomic_unchecked_t rdma_stat_sq_prod;
68468
68469 #define RPCRDMA_VERSION 1
68470
68471diff --git a/include/linux/suspend.h b/include/linux/suspend.h
68472index 5e781d8..1e62818 100644
68473--- a/include/linux/suspend.h
68474+++ b/include/linux/suspend.h
68475@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
68476 * which require special recovery actions in that situation.
68477 */
68478 struct platform_suspend_ops {
68479- int (*valid)(suspend_state_t state);
68480- int (*begin)(suspend_state_t state);
68481- int (*prepare)(void);
68482- int (*prepare_late)(void);
68483- int (*enter)(suspend_state_t state);
68484- void (*wake)(void);
68485- void (*finish)(void);
68486- void (*end)(void);
68487- void (*recover)(void);
68488+ int (* const valid)(suspend_state_t state);
68489+ int (* const begin)(suspend_state_t state);
68490+ int (* const prepare)(void);
68491+ int (* const prepare_late)(void);
68492+ int (* const enter)(suspend_state_t state);
68493+ void (* const wake)(void);
68494+ void (* const finish)(void);
68495+ void (* const end)(void);
68496+ void (* const recover)(void);
68497 };
68498
68499 #ifdef CONFIG_SUSPEND
68500@@ -120,7 +120,7 @@ struct platform_suspend_ops {
68501 * suspend_set_ops - set platform dependent suspend operations
68502 * @ops: The new suspend operations to set.
68503 */
68504-extern void suspend_set_ops(struct platform_suspend_ops *ops);
68505+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
68506 extern int suspend_valid_only_mem(suspend_state_t state);
68507
68508 /**
68509@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
68510 #else /* !CONFIG_SUSPEND */
68511 #define suspend_valid_only_mem NULL
68512
68513-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
68514+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
68515 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
68516 #endif /* !CONFIG_SUSPEND */
68517
68518@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
68519 * platforms which require special recovery actions in that situation.
68520 */
68521 struct platform_hibernation_ops {
68522- int (*begin)(void);
68523- void (*end)(void);
68524- int (*pre_snapshot)(void);
68525- void (*finish)(void);
68526- int (*prepare)(void);
68527- int (*enter)(void);
68528- void (*leave)(void);
68529- int (*pre_restore)(void);
68530- void (*restore_cleanup)(void);
68531- void (*recover)(void);
68532+ int (* const begin)(void);
68533+ void (* const end)(void);
68534+ int (* const pre_snapshot)(void);
68535+ void (* const finish)(void);
68536+ int (* const prepare)(void);
68537+ int (* const enter)(void);
68538+ void (* const leave)(void);
68539+ int (* const pre_restore)(void);
68540+ void (* const restore_cleanup)(void);
68541+ void (* const recover)(void);
68542 };
68543
68544 #ifdef CONFIG_HIBERNATION
68545@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
68546 extern void swsusp_unset_page_free(struct page *);
68547 extern unsigned long get_safe_page(gfp_t gfp_mask);
68548
68549-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
68550+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
68551 extern int hibernate(void);
68552 extern bool system_entering_hibernation(void);
68553 #else /* CONFIG_HIBERNATION */
68554@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
68555 static inline void swsusp_set_page_free(struct page *p) {}
68556 static inline void swsusp_unset_page_free(struct page *p) {}
68557
68558-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
68559+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
68560 static inline int hibernate(void) { return -ENOSYS; }
68561 static inline bool system_entering_hibernation(void) { return false; }
68562 #endif /* CONFIG_HIBERNATION */
68563diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
68564index 0eb6942..a805cb6 100644
68565--- a/include/linux/sysctl.h
68566+++ b/include/linux/sysctl.h
68567@@ -164,7 +164,11 @@ enum
68568 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
68569 };
68570
68571-
68572+#ifdef CONFIG_PAX_SOFTMODE
68573+enum {
68574+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
68575+};
68576+#endif
68577
68578 /* CTL_VM names: */
68579 enum
68580@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
68581
68582 extern int proc_dostring(struct ctl_table *, int,
68583 void __user *, size_t *, loff_t *);
68584+extern int proc_dostring_modpriv(struct ctl_table *, int,
68585+ void __user *, size_t *, loff_t *);
68586 extern int proc_dointvec(struct ctl_table *, int,
68587 void __user *, size_t *, loff_t *);
68588 extern int proc_dointvec_minmax(struct ctl_table *, int,
68589@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
68590
68591 extern ctl_handler sysctl_data;
68592 extern ctl_handler sysctl_string;
68593+extern ctl_handler sysctl_string_modpriv;
68594 extern ctl_handler sysctl_intvec;
68595 extern ctl_handler sysctl_jiffies;
68596 extern ctl_handler sysctl_ms_jiffies;
68597diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
68598index 9d68fed..71f02cc 100644
68599--- a/include/linux/sysfs.h
68600+++ b/include/linux/sysfs.h
68601@@ -75,8 +75,8 @@ struct bin_attribute {
68602 };
68603
68604 struct sysfs_ops {
68605- ssize_t (*show)(struct kobject *, struct attribute *,char *);
68606- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
68607+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
68608+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
68609 };
68610
68611 struct sysfs_dirent;
68612diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
68613index a8cc4e1..98d3b85 100644
68614--- a/include/linux/thread_info.h
68615+++ b/include/linux/thread_info.h
68616@@ -23,7 +23,7 @@ struct restart_block {
68617 };
68618 /* For futex_wait and futex_wait_requeue_pi */
68619 struct {
68620- u32 *uaddr;
68621+ u32 __user *uaddr;
68622 u32 val;
68623 u32 flags;
68624 u32 bitset;
68625diff --git a/include/linux/tty.h b/include/linux/tty.h
68626index e9c57e9..ee6d489 100644
68627--- a/include/linux/tty.h
68628+++ b/include/linux/tty.h
68629@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
68630 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
68631 extern void tty_ldisc_enable(struct tty_struct *tty);
68632
68633-
68634 /* n_tty.c */
68635 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
68636
68637diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
68638index 0c4ee9b..9f7c426 100644
68639--- a/include/linux/tty_ldisc.h
68640+++ b/include/linux/tty_ldisc.h
68641@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
68642
68643 struct module *owner;
68644
68645- int refcount;
68646+ atomic_t refcount;
68647 };
68648
68649 struct tty_ldisc {
68650diff --git a/include/linux/types.h b/include/linux/types.h
68651index c42724f..d190eee 100644
68652--- a/include/linux/types.h
68653+++ b/include/linux/types.h
68654@@ -191,10 +191,26 @@ typedef struct {
68655 volatile int counter;
68656 } atomic_t;
68657
68658+#ifdef CONFIG_PAX_REFCOUNT
68659+typedef struct {
68660+ volatile int counter;
68661+} atomic_unchecked_t;
68662+#else
68663+typedef atomic_t atomic_unchecked_t;
68664+#endif
68665+
68666 #ifdef CONFIG_64BIT
68667 typedef struct {
68668 volatile long counter;
68669 } atomic64_t;
68670+
68671+#ifdef CONFIG_PAX_REFCOUNT
68672+typedef struct {
68673+ volatile long counter;
68674+} atomic64_unchecked_t;
68675+#else
68676+typedef atomic64_t atomic64_unchecked_t;
68677+#endif
68678 #endif
68679
68680 struct ustat {
68681diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
68682index 6b58367..53a3e8e 100644
68683--- a/include/linux/uaccess.h
68684+++ b/include/linux/uaccess.h
68685@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
68686 long ret; \
68687 mm_segment_t old_fs = get_fs(); \
68688 \
68689- set_fs(KERNEL_DS); \
68690 pagefault_disable(); \
68691- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
68692- pagefault_enable(); \
68693+ set_fs(KERNEL_DS); \
68694+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
68695 set_fs(old_fs); \
68696+ pagefault_enable(); \
68697 ret; \
68698 })
68699
68700@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
68701 * Safely read from address @src to the buffer at @dst. If a kernel fault
68702 * happens, handle that and return -EFAULT.
68703 */
68704-extern long probe_kernel_read(void *dst, void *src, size_t size);
68705+extern long probe_kernel_read(void *dst, const void *src, size_t size);
68706
68707 /*
68708 * probe_kernel_write(): safely attempt to write to a location
68709@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
68710 * Safely write to address @dst from the buffer at @src. If a kernel fault
68711 * happens, handle that and return -EFAULT.
68712 */
68713-extern long probe_kernel_write(void *dst, void *src, size_t size);
68714+extern long probe_kernel_write(void *dst, const void *src, size_t size);
68715
68716 #endif /* __LINUX_UACCESS_H__ */
68717diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
68718index 99c1b4d..bb94261 100644
68719--- a/include/linux/unaligned/access_ok.h
68720+++ b/include/linux/unaligned/access_ok.h
68721@@ -6,32 +6,32 @@
68722
68723 static inline u16 get_unaligned_le16(const void *p)
68724 {
68725- return le16_to_cpup((__le16 *)p);
68726+ return le16_to_cpup((const __le16 *)p);
68727 }
68728
68729 static inline u32 get_unaligned_le32(const void *p)
68730 {
68731- return le32_to_cpup((__le32 *)p);
68732+ return le32_to_cpup((const __le32 *)p);
68733 }
68734
68735 static inline u64 get_unaligned_le64(const void *p)
68736 {
68737- return le64_to_cpup((__le64 *)p);
68738+ return le64_to_cpup((const __le64 *)p);
68739 }
68740
68741 static inline u16 get_unaligned_be16(const void *p)
68742 {
68743- return be16_to_cpup((__be16 *)p);
68744+ return be16_to_cpup((const __be16 *)p);
68745 }
68746
68747 static inline u32 get_unaligned_be32(const void *p)
68748 {
68749- return be32_to_cpup((__be32 *)p);
68750+ return be32_to_cpup((const __be32 *)p);
68751 }
68752
68753 static inline u64 get_unaligned_be64(const void *p)
68754 {
68755- return be64_to_cpup((__be64 *)p);
68756+ return be64_to_cpup((const __be64 *)p);
68757 }
68758
68759 static inline void put_unaligned_le16(u16 val, void *p)
68760diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
68761index 79b9837..b5a56f9 100644
68762--- a/include/linux/vermagic.h
68763+++ b/include/linux/vermagic.h
68764@@ -26,9 +26,35 @@
68765 #define MODULE_ARCH_VERMAGIC ""
68766 #endif
68767
68768+#ifdef CONFIG_PAX_REFCOUNT
68769+#define MODULE_PAX_REFCOUNT "REFCOUNT "
68770+#else
68771+#define MODULE_PAX_REFCOUNT ""
68772+#endif
68773+
68774+#ifdef CONSTIFY_PLUGIN
68775+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
68776+#else
68777+#define MODULE_CONSTIFY_PLUGIN ""
68778+#endif
68779+
68780+#ifdef STACKLEAK_PLUGIN
68781+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
68782+#else
68783+#define MODULE_STACKLEAK_PLUGIN ""
68784+#endif
68785+
68786+#ifdef CONFIG_GRKERNSEC
68787+#define MODULE_GRSEC "GRSEC "
68788+#else
68789+#define MODULE_GRSEC ""
68790+#endif
68791+
68792 #define VERMAGIC_STRING \
68793 UTS_RELEASE " " \
68794 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
68795 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
68796- MODULE_ARCH_VERMAGIC
68797+ MODULE_ARCH_VERMAGIC \
68798+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
68799+ MODULE_GRSEC
68800
68801diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
68802index 819a634..462ac12 100644
68803--- a/include/linux/vmalloc.h
68804+++ b/include/linux/vmalloc.h
68805@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
68806 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
68807 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
68808 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
68809+
68810+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
68811+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
68812+#endif
68813+
68814 /* bits [20..32] reserved for arch specific ioremap internals */
68815
68816 /*
68817@@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
68818
68819 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
68820
68821+#define vmalloc(x) \
68822+({ \
68823+ void *___retval; \
68824+ intoverflow_t ___x = (intoverflow_t)x; \
68825+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
68826+ ___retval = NULL; \
68827+ else \
68828+ ___retval = vmalloc((unsigned long)___x); \
68829+ ___retval; \
68830+})
68831+
68832+#define __vmalloc(x, y, z) \
68833+({ \
68834+ void *___retval; \
68835+ intoverflow_t ___x = (intoverflow_t)x; \
68836+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
68837+ ___retval = NULL; \
68838+ else \
68839+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
68840+ ___retval; \
68841+})
68842+
68843+#define vmalloc_user(x) \
68844+({ \
68845+ void *___retval; \
68846+ intoverflow_t ___x = (intoverflow_t)x; \
68847+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
68848+ ___retval = NULL; \
68849+ else \
68850+ ___retval = vmalloc_user((unsigned long)___x); \
68851+ ___retval; \
68852+})
68853+
68854+#define vmalloc_exec(x) \
68855+({ \
68856+ void *___retval; \
68857+ intoverflow_t ___x = (intoverflow_t)x; \
68858+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
68859+ ___retval = NULL; \
68860+ else \
68861+ ___retval = vmalloc_exec((unsigned long)___x); \
68862+ ___retval; \
68863+})
68864+
68865+#define vmalloc_node(x, y) \
68866+({ \
68867+ void *___retval; \
68868+ intoverflow_t ___x = (intoverflow_t)x; \
68869+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
68870+ ___retval = NULL; \
68871+ else \
68872+ ___retval = vmalloc_node((unsigned long)___x, (y));\
68873+ ___retval; \
68874+})
68875+
68876+#define vmalloc_32(x) \
68877+({ \
68878+ void *___retval; \
68879+ intoverflow_t ___x = (intoverflow_t)x; \
68880+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
68881+ ___retval = NULL; \
68882+ else \
68883+ ___retval = vmalloc_32((unsigned long)___x); \
68884+ ___retval; \
68885+})
68886+
68887+#define vmalloc_32_user(x) \
68888+({ \
68889+ void *___retval; \
68890+ intoverflow_t ___x = (intoverflow_t)x; \
68891+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
68892+ ___retval = NULL; \
68893+ else \
68894+ ___retval = vmalloc_32_user((unsigned long)___x);\
68895+ ___retval; \
68896+})
68897+
68898 #endif /* _LINUX_VMALLOC_H */
68899diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
68900index 13070d6..aa4159a 100644
68901--- a/include/linux/vmstat.h
68902+++ b/include/linux/vmstat.h
68903@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
68904 /*
68905 * Zone based page accounting with per cpu differentials.
68906 */
68907-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68908+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68909
68910 static inline void zone_page_state_add(long x, struct zone *zone,
68911 enum zone_stat_item item)
68912 {
68913- atomic_long_add(x, &zone->vm_stat[item]);
68914- atomic_long_add(x, &vm_stat[item]);
68915+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
68916+ atomic_long_add_unchecked(x, &vm_stat[item]);
68917 }
68918
68919 static inline unsigned long global_page_state(enum zone_stat_item item)
68920 {
68921- long x = atomic_long_read(&vm_stat[item]);
68922+ long x = atomic_long_read_unchecked(&vm_stat[item]);
68923 #ifdef CONFIG_SMP
68924 if (x < 0)
68925 x = 0;
68926@@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
68927 static inline unsigned long zone_page_state(struct zone *zone,
68928 enum zone_stat_item item)
68929 {
68930- long x = atomic_long_read(&zone->vm_stat[item]);
68931+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
68932 #ifdef CONFIG_SMP
68933 if (x < 0)
68934 x = 0;
68935@@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
68936 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
68937 enum zone_stat_item item)
68938 {
68939- long x = atomic_long_read(&zone->vm_stat[item]);
68940+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
68941
68942 #ifdef CONFIG_SMP
68943 int cpu;
68944@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
68945
68946 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
68947 {
68948- atomic_long_inc(&zone->vm_stat[item]);
68949- atomic_long_inc(&vm_stat[item]);
68950+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
68951+ atomic_long_inc_unchecked(&vm_stat[item]);
68952 }
68953
68954 static inline void __inc_zone_page_state(struct page *page,
68955@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
68956
68957 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
68958 {
68959- atomic_long_dec(&zone->vm_stat[item]);
68960- atomic_long_dec(&vm_stat[item]);
68961+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
68962+ atomic_long_dec_unchecked(&vm_stat[item]);
68963 }
68964
68965 static inline void __dec_zone_page_state(struct page *page,
68966diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
68967index eed5fcc..5080d24 100644
68968--- a/include/media/saa7146_vv.h
68969+++ b/include/media/saa7146_vv.h
68970@@ -167,7 +167,7 @@ struct saa7146_ext_vv
68971 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
68972
68973 /* the extension can override this */
68974- struct v4l2_ioctl_ops ops;
68975+ v4l2_ioctl_ops_no_const ops;
68976 /* pointer to the saa7146 core ops */
68977 const struct v4l2_ioctl_ops *core_ops;
68978
68979diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
68980index 73c9867..2da8837 100644
68981--- a/include/media/v4l2-dev.h
68982+++ b/include/media/v4l2-dev.h
68983@@ -34,7 +34,7 @@ struct v4l2_device;
68984 #define V4L2_FL_UNREGISTERED (0)
68985
68986 struct v4l2_file_operations {
68987- struct module *owner;
68988+ struct module * const owner;
68989 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
68990 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
68991 unsigned int (*poll) (struct file *, struct poll_table_struct *);
68992@@ -46,6 +46,7 @@ struct v4l2_file_operations {
68993 int (*open) (struct file *);
68994 int (*release) (struct file *);
68995 };
68996+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
68997
68998 /*
68999 * Newer version of video_device, handled by videodev2.c
69000diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
69001index 5d5d550..f559ef1 100644
69002--- a/include/media/v4l2-device.h
69003+++ b/include/media/v4l2-device.h
69004@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
69005 this function returns 0. If the name ends with a digit (e.g. cx18),
69006 then the name will be set to cx18-0 since cx180 looks really odd. */
69007 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
69008- atomic_t *instance);
69009+ atomic_unchecked_t *instance);
69010
69011 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
69012 Since the parent disappears this ensures that v4l2_dev doesn't have an
69013diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
69014index 7a4529d..7244290 100644
69015--- a/include/media/v4l2-ioctl.h
69016+++ b/include/media/v4l2-ioctl.h
69017@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
69018 long (*vidioc_default) (struct file *file, void *fh,
69019 int cmd, void *arg);
69020 };
69021+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
69022
69023
69024 /* v4l debugging and diagnostics */
69025diff --git a/include/net/flow.h b/include/net/flow.h
69026index 809970b..c3df4f3 100644
69027--- a/include/net/flow.h
69028+++ b/include/net/flow.h
69029@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
69030 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
69031 u8 dir, flow_resolve_t resolver);
69032 extern void flow_cache_flush(void);
69033-extern atomic_t flow_cache_genid;
69034+extern atomic_unchecked_t flow_cache_genid;
69035
69036 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
69037 {
69038diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
69039index 15e1f8fe..668837c 100644
69040--- a/include/net/inetpeer.h
69041+++ b/include/net/inetpeer.h
69042@@ -24,7 +24,7 @@ struct inet_peer
69043 __u32 dtime; /* the time of last use of not
69044 * referenced entries */
69045 atomic_t refcnt;
69046- atomic_t rid; /* Frag reception counter */
69047+ atomic_unchecked_t rid; /* Frag reception counter */
69048 __u32 tcp_ts;
69049 unsigned long tcp_ts_stamp;
69050 };
69051diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
69052index 98978e7..2243a3d 100644
69053--- a/include/net/ip_vs.h
69054+++ b/include/net/ip_vs.h
69055@@ -365,7 +365,7 @@ struct ip_vs_conn {
69056 struct ip_vs_conn *control; /* Master control connection */
69057 atomic_t n_control; /* Number of controlled ones */
69058 struct ip_vs_dest *dest; /* real server */
69059- atomic_t in_pkts; /* incoming packet counter */
69060+ atomic_unchecked_t in_pkts; /* incoming packet counter */
69061
69062 /* packet transmitter for different forwarding methods. If it
69063 mangles the packet, it must return NF_DROP or better NF_STOLEN,
69064@@ -466,7 +466,7 @@ struct ip_vs_dest {
69065 union nf_inet_addr addr; /* IP address of the server */
69066 __be16 port; /* port number of the server */
69067 volatile unsigned flags; /* dest status flags */
69068- atomic_t conn_flags; /* flags to copy to conn */
69069+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
69070 atomic_t weight; /* server weight */
69071
69072 atomic_t refcnt; /* reference counter */
69073diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
69074index 69b610a..fe3962c 100644
69075--- a/include/net/irda/ircomm_core.h
69076+++ b/include/net/irda/ircomm_core.h
69077@@ -51,7 +51,7 @@ typedef struct {
69078 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
69079 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
69080 struct ircomm_info *);
69081-} call_t;
69082+} __no_const call_t;
69083
69084 struct ircomm_cb {
69085 irda_queue_t queue;
69086diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
69087index eea2e61..08c692d 100644
69088--- a/include/net/irda/ircomm_tty.h
69089+++ b/include/net/irda/ircomm_tty.h
69090@@ -35,6 +35,7 @@
69091 #include <linux/termios.h>
69092 #include <linux/timer.h>
69093 #include <linux/tty.h> /* struct tty_struct */
69094+#include <asm/local.h>
69095
69096 #include <net/irda/irias_object.h>
69097 #include <net/irda/ircomm_core.h>
69098@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
69099 unsigned short close_delay;
69100 unsigned short closing_wait; /* time to wait before closing */
69101
69102- int open_count;
69103- int blocked_open; /* # of blocked opens */
69104+ local_t open_count;
69105+ local_t blocked_open; /* # of blocked opens */
69106
69107 /* Protect concurent access to :
69108 * o self->open_count
69109diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
69110index f82a1e8..82d81e8 100644
69111--- a/include/net/iucv/af_iucv.h
69112+++ b/include/net/iucv/af_iucv.h
69113@@ -87,7 +87,7 @@ struct iucv_sock {
69114 struct iucv_sock_list {
69115 struct hlist_head head;
69116 rwlock_t lock;
69117- atomic_t autobind_name;
69118+ atomic_unchecked_t autobind_name;
69119 };
69120
69121 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
69122diff --git a/include/net/lapb.h b/include/net/lapb.h
69123index 96cb5dd..25e8d4f 100644
69124--- a/include/net/lapb.h
69125+++ b/include/net/lapb.h
69126@@ -95,7 +95,7 @@ struct lapb_cb {
69127 struct sk_buff_head write_queue;
69128 struct sk_buff_head ack_queue;
69129 unsigned char window;
69130- struct lapb_register_struct callbacks;
69131+ struct lapb_register_struct *callbacks;
69132
69133 /* FRMR control information */
69134 struct lapb_frame frmr_data;
69135diff --git a/include/net/neighbour.h b/include/net/neighbour.h
69136index 3817fda..cdb2343 100644
69137--- a/include/net/neighbour.h
69138+++ b/include/net/neighbour.h
69139@@ -131,7 +131,7 @@ struct neigh_ops
69140 int (*connected_output)(struct sk_buff*);
69141 int (*hh_output)(struct sk_buff*);
69142 int (*queue_xmit)(struct sk_buff*);
69143-};
69144+} __do_const;
69145
69146 struct pneigh_entry
69147 {
69148diff --git a/include/net/netlink.h b/include/net/netlink.h
69149index c344646..4778c71 100644
69150--- a/include/net/netlink.h
69151+++ b/include/net/netlink.h
69152@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
69153 {
69154 return (remaining >= (int) sizeof(struct nlmsghdr) &&
69155 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
69156- nlh->nlmsg_len <= remaining);
69157+ nlh->nlmsg_len <= (unsigned int)remaining);
69158 }
69159
69160 /**
69161@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
69162 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
69163 {
69164 if (mark)
69165- skb_trim(skb, (unsigned char *) mark - skb->data);
69166+ skb_trim(skb, (const unsigned char *) mark - skb->data);
69167 }
69168
69169 /**
69170diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
69171index 9a4b8b7..e49e077 100644
69172--- a/include/net/netns/ipv4.h
69173+++ b/include/net/netns/ipv4.h
69174@@ -54,7 +54,7 @@ struct netns_ipv4 {
69175 int current_rt_cache_rebuild_count;
69176
69177 struct timer_list rt_secret_timer;
69178- atomic_t rt_genid;
69179+ atomic_unchecked_t rt_genid;
69180
69181 #ifdef CONFIG_IP_MROUTE
69182 struct sock *mroute_sk;
69183diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
69184index 8a6d529..171f401 100644
69185--- a/include/net/sctp/sctp.h
69186+++ b/include/net/sctp/sctp.h
69187@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
69188
69189 #else /* SCTP_DEBUG */
69190
69191-#define SCTP_DEBUG_PRINTK(whatever...)
69192-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
69193+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
69194+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
69195 #define SCTP_ENABLE_DEBUG
69196 #define SCTP_DISABLE_DEBUG
69197 #define SCTP_ASSERT(expr, str, func)
69198diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
69199index d97f689..f3b90ab 100644
69200--- a/include/net/secure_seq.h
69201+++ b/include/net/secure_seq.h
69202@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
69203 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
69204 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
69205 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
69206- __be16 dport);
69207+ __be16 dport);
69208 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
69209 __be16 sport, __be16 dport);
69210 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
69211- __be16 sport, __be16 dport);
69212+ __be16 sport, __be16 dport);
69213 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
69214- __be16 sport, __be16 dport);
69215+ __be16 sport, __be16 dport);
69216 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
69217- __be16 sport, __be16 dport);
69218+ __be16 sport, __be16 dport);
69219
69220 #endif /* _NET_SECURE_SEQ */
69221diff --git a/include/net/sock.h b/include/net/sock.h
69222index 9f96394..76fc9c7 100644
69223--- a/include/net/sock.h
69224+++ b/include/net/sock.h
69225@@ -272,7 +272,7 @@ struct sock {
69226 rwlock_t sk_callback_lock;
69227 int sk_err,
69228 sk_err_soft;
69229- atomic_t sk_drops;
69230+ atomic_unchecked_t sk_drops;
69231 unsigned short sk_ack_backlog;
69232 unsigned short sk_max_ack_backlog;
69233 __u32 sk_priority;
69234@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
69235 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
69236 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
69237 #else
69238-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
69239+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
69240 int inc)
69241 {
69242 }
69243diff --git a/include/net/tcp.h b/include/net/tcp.h
69244index 6cfe18b..dd21acb 100644
69245--- a/include/net/tcp.h
69246+++ b/include/net/tcp.h
69247@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
69248 struct tcp_seq_afinfo {
69249 char *name;
69250 sa_family_t family;
69251- struct file_operations seq_fops;
69252- struct seq_operations seq_ops;
69253+ file_operations_no_const seq_fops;
69254+ seq_operations_no_const seq_ops;
69255 };
69256
69257 struct tcp_iter_state {
69258diff --git a/include/net/udp.h b/include/net/udp.h
69259index f98abd2..b4b042f 100644
69260--- a/include/net/udp.h
69261+++ b/include/net/udp.h
69262@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
69263 char *name;
69264 sa_family_t family;
69265 struct udp_table *udp_table;
69266- struct file_operations seq_fops;
69267- struct seq_operations seq_ops;
69268+ file_operations_no_const seq_fops;
69269+ seq_operations_no_const seq_ops;
69270 };
69271
69272 struct udp_iter_state {
69273diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
69274index cbb822e..e9c1cbe 100644
69275--- a/include/rdma/iw_cm.h
69276+++ b/include/rdma/iw_cm.h
69277@@ -129,7 +129,7 @@ struct iw_cm_verbs {
69278 int backlog);
69279
69280 int (*destroy_listen)(struct iw_cm_id *cm_id);
69281-};
69282+} __no_const;
69283
69284 /**
69285 * iw_create_cm_id - Create an IW CM identifier.
69286diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
69287index 09a124b..caa8ca8 100644
69288--- a/include/scsi/libfc.h
69289+++ b/include/scsi/libfc.h
69290@@ -675,6 +675,7 @@ struct libfc_function_template {
69291 */
69292 void (*disc_stop_final) (struct fc_lport *);
69293 };
69294+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
69295
69296 /* information used by the discovery layer */
69297 struct fc_disc {
69298@@ -707,7 +708,7 @@ struct fc_lport {
69299 struct fc_disc disc;
69300
69301 /* Operational Information */
69302- struct libfc_function_template tt;
69303+ libfc_function_template_no_const tt;
69304 u8 link_up;
69305 u8 qfull;
69306 enum fc_lport_state state;
69307diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
69308index de8e180..f15e0d7 100644
69309--- a/include/scsi/scsi_device.h
69310+++ b/include/scsi/scsi_device.h
69311@@ -156,9 +156,9 @@ struct scsi_device {
69312 unsigned int max_device_blocked; /* what device_blocked counts down from */
69313 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
69314
69315- atomic_t iorequest_cnt;
69316- atomic_t iodone_cnt;
69317- atomic_t ioerr_cnt;
69318+ atomic_unchecked_t iorequest_cnt;
69319+ atomic_unchecked_t iodone_cnt;
69320+ atomic_unchecked_t ioerr_cnt;
69321
69322 struct device sdev_gendev,
69323 sdev_dev;
69324diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
69325index fc50bd6..81ba9cb 100644
69326--- a/include/scsi/scsi_transport_fc.h
69327+++ b/include/scsi/scsi_transport_fc.h
69328@@ -708,7 +708,7 @@ struct fc_function_template {
69329 unsigned long show_host_system_hostname:1;
69330
69331 unsigned long disable_target_scan:1;
69332-};
69333+} __do_const;
69334
69335
69336 /**
69337diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
69338index 3dae3f7..8440d6f 100644
69339--- a/include/sound/ac97_codec.h
69340+++ b/include/sound/ac97_codec.h
69341@@ -419,15 +419,15 @@
69342 struct snd_ac97;
69343
69344 struct snd_ac97_build_ops {
69345- int (*build_3d) (struct snd_ac97 *ac97);
69346- int (*build_specific) (struct snd_ac97 *ac97);
69347- int (*build_spdif) (struct snd_ac97 *ac97);
69348- int (*build_post_spdif) (struct snd_ac97 *ac97);
69349+ int (* const build_3d) (struct snd_ac97 *ac97);
69350+ int (* const build_specific) (struct snd_ac97 *ac97);
69351+ int (* const build_spdif) (struct snd_ac97 *ac97);
69352+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
69353 #ifdef CONFIG_PM
69354- void (*suspend) (struct snd_ac97 *ac97);
69355- void (*resume) (struct snd_ac97 *ac97);
69356+ void (* const suspend) (struct snd_ac97 *ac97);
69357+ void (* const resume) (struct snd_ac97 *ac97);
69358 #endif
69359- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
69360+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
69361 };
69362
69363 struct snd_ac97_bus_ops {
69364@@ -477,7 +477,7 @@ struct snd_ac97_template {
69365
69366 struct snd_ac97 {
69367 /* -- lowlevel (hardware) driver specific -- */
69368- struct snd_ac97_build_ops * build_ops;
69369+ const struct snd_ac97_build_ops * build_ops;
69370 void *private_data;
69371 void (*private_free) (struct snd_ac97 *ac97);
69372 /* --- */
69373diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
69374index 891cf1a..a94ba2b 100644
69375--- a/include/sound/ak4xxx-adda.h
69376+++ b/include/sound/ak4xxx-adda.h
69377@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
69378 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
69379 unsigned char val);
69380 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
69381-};
69382+} __no_const;
69383
69384 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
69385
69386diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
69387index 8c05e47..2b5df97 100644
69388--- a/include/sound/hwdep.h
69389+++ b/include/sound/hwdep.h
69390@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
69391 struct snd_hwdep_dsp_status *status);
69392 int (*dsp_load)(struct snd_hwdep *hw,
69393 struct snd_hwdep_dsp_image *image);
69394-};
69395+} __no_const;
69396
69397 struct snd_hwdep {
69398 struct snd_card *card;
69399diff --git a/include/sound/info.h b/include/sound/info.h
69400index 112e894..6fda5b5 100644
69401--- a/include/sound/info.h
69402+++ b/include/sound/info.h
69403@@ -44,7 +44,7 @@ struct snd_info_entry_text {
69404 struct snd_info_buffer *buffer);
69405 void (*write)(struct snd_info_entry *entry,
69406 struct snd_info_buffer *buffer);
69407-};
69408+} __no_const;
69409
69410 struct snd_info_entry_ops {
69411 int (*open)(struct snd_info_entry *entry,
69412diff --git a/include/sound/pcm.h b/include/sound/pcm.h
69413index de6d981..590a550 100644
69414--- a/include/sound/pcm.h
69415+++ b/include/sound/pcm.h
69416@@ -80,6 +80,7 @@ struct snd_pcm_ops {
69417 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
69418 int (*ack)(struct snd_pcm_substream *substream);
69419 };
69420+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
69421
69422 /*
69423 *
69424diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
69425index 736eac7..fe8a80f 100644
69426--- a/include/sound/sb16_csp.h
69427+++ b/include/sound/sb16_csp.h
69428@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
69429 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
69430 int (*csp_stop) (struct snd_sb_csp * p);
69431 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
69432-};
69433+} __no_const;
69434
69435 /*
69436 * CSP private data
69437diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
69438index 444cd6b..3327cc5 100644
69439--- a/include/sound/ymfpci.h
69440+++ b/include/sound/ymfpci.h
69441@@ -358,7 +358,7 @@ struct snd_ymfpci {
69442 spinlock_t reg_lock;
69443 spinlock_t voice_lock;
69444 wait_queue_head_t interrupt_sleep;
69445- atomic_t interrupt_sleep_count;
69446+ atomic_unchecked_t interrupt_sleep_count;
69447 struct snd_info_entry *proc_entry;
69448 const struct firmware *dsp_microcode;
69449 const struct firmware *controller_microcode;
69450diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
69451index b89f9db..f097b38 100644
69452--- a/include/trace/events/irq.h
69453+++ b/include/trace/events/irq.h
69454@@ -34,7 +34,7 @@
69455 */
69456 TRACE_EVENT(irq_handler_entry,
69457
69458- TP_PROTO(int irq, struct irqaction *action),
69459+ TP_PROTO(int irq, const struct irqaction *action),
69460
69461 TP_ARGS(irq, action),
69462
69463@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
69464 */
69465 TRACE_EVENT(irq_handler_exit,
69466
69467- TP_PROTO(int irq, struct irqaction *action, int ret),
69468+ TP_PROTO(int irq, const struct irqaction *action, int ret),
69469
69470 TP_ARGS(irq, action, ret),
69471
69472@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
69473 */
69474 TRACE_EVENT(softirq_entry,
69475
69476- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
69477+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
69478
69479 TP_ARGS(h, vec),
69480
69481@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
69482 */
69483 TRACE_EVENT(softirq_exit,
69484
69485- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
69486+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
69487
69488 TP_ARGS(h, vec),
69489
69490diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
69491index 0993a22..32ba2fe 100644
69492--- a/include/video/uvesafb.h
69493+++ b/include/video/uvesafb.h
69494@@ -177,6 +177,7 @@ struct uvesafb_par {
69495 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
69496 u8 pmi_setpal; /* PMI for palette changes */
69497 u16 *pmi_base; /* protected mode interface location */
69498+ u8 *pmi_code; /* protected mode code location */
69499 void *pmi_start;
69500 void *pmi_pal;
69501 u8 *vbe_state_orig; /*
69502diff --git a/init/Kconfig b/init/Kconfig
69503index d72691b..3996e54 100644
69504--- a/init/Kconfig
69505+++ b/init/Kconfig
69506@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
69507
69508 config COMPAT_BRK
69509 bool "Disable heap randomization"
69510- default y
69511+ default n
69512 help
69513 Randomizing heap placement makes heap exploits harder, but it
69514 also breaks ancient binaries (including anything libc5 based).
69515diff --git a/init/do_mounts.c b/init/do_mounts.c
69516index bb008d0..4fa3933 100644
69517--- a/init/do_mounts.c
69518+++ b/init/do_mounts.c
69519@@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
69520
69521 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
69522 {
69523- int err = sys_mount(name, "/root", fs, flags, data);
69524+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
69525 if (err)
69526 return err;
69527
69528- sys_chdir("/root");
69529+ sys_chdir((__force const char __user *)"/root");
69530 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
69531 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
69532 current->fs->pwd.mnt->mnt_sb->s_type->name,
69533@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
69534 va_start(args, fmt);
69535 vsprintf(buf, fmt, args);
69536 va_end(args);
69537- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
69538+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
69539 if (fd >= 0) {
69540 sys_ioctl(fd, FDEJECT, 0);
69541 sys_close(fd);
69542 }
69543 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
69544- fd = sys_open("/dev/console", O_RDWR, 0);
69545+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
69546 if (fd >= 0) {
69547 sys_ioctl(fd, TCGETS, (long)&termios);
69548 termios.c_lflag &= ~ICANON;
69549 sys_ioctl(fd, TCSETSF, (long)&termios);
69550- sys_read(fd, &c, 1);
69551+ sys_read(fd, (char __user *)&c, 1);
69552 termios.c_lflag |= ICANON;
69553 sys_ioctl(fd, TCSETSF, (long)&termios);
69554 sys_close(fd);
69555@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
69556 mount_root();
69557 out:
69558 devtmpfs_mount("dev");
69559- sys_mount(".", "/", NULL, MS_MOVE, NULL);
69560- sys_chroot(".");
69561+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
69562+ sys_chroot((__force char __user *)".");
69563 }
69564diff --git a/init/do_mounts.h b/init/do_mounts.h
69565index f5b978a..69dbfe8 100644
69566--- a/init/do_mounts.h
69567+++ b/init/do_mounts.h
69568@@ -15,15 +15,15 @@ extern int root_mountflags;
69569
69570 static inline int create_dev(char *name, dev_t dev)
69571 {
69572- sys_unlink(name);
69573- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
69574+ sys_unlink((char __force_user *)name);
69575+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
69576 }
69577
69578 #if BITS_PER_LONG == 32
69579 static inline u32 bstat(char *name)
69580 {
69581 struct stat64 stat;
69582- if (sys_stat64(name, &stat) != 0)
69583+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
69584 return 0;
69585 if (!S_ISBLK(stat.st_mode))
69586 return 0;
69587@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
69588 static inline u32 bstat(char *name)
69589 {
69590 struct stat stat;
69591- if (sys_newstat(name, &stat) != 0)
69592+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
69593 return 0;
69594 if (!S_ISBLK(stat.st_mode))
69595 return 0;
69596diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
69597index 614241b..4da046b 100644
69598--- a/init/do_mounts_initrd.c
69599+++ b/init/do_mounts_initrd.c
69600@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
69601 sys_close(old_fd);sys_close(root_fd);
69602 sys_close(0);sys_close(1);sys_close(2);
69603 sys_setsid();
69604- (void) sys_open("/dev/console",O_RDWR,0);
69605+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
69606 (void) sys_dup(0);
69607 (void) sys_dup(0);
69608 return kernel_execve(shell, argv, envp_init);
69609@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
69610 create_dev("/dev/root.old", Root_RAM0);
69611 /* mount initrd on rootfs' /root */
69612 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
69613- sys_mkdir("/old", 0700);
69614- root_fd = sys_open("/", 0, 0);
69615- old_fd = sys_open("/old", 0, 0);
69616+ sys_mkdir((const char __force_user *)"/old", 0700);
69617+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
69618+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
69619 /* move initrd over / and chdir/chroot in initrd root */
69620- sys_chdir("/root");
69621- sys_mount(".", "/", NULL, MS_MOVE, NULL);
69622- sys_chroot(".");
69623+ sys_chdir((const char __force_user *)"/root");
69624+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
69625+ sys_chroot((const char __force_user *)".");
69626
69627 /*
69628 * In case that a resume from disk is carried out by linuxrc or one of
69629@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
69630
69631 /* move initrd to rootfs' /old */
69632 sys_fchdir(old_fd);
69633- sys_mount("/", ".", NULL, MS_MOVE, NULL);
69634+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
69635 /* switch root and cwd back to / of rootfs */
69636 sys_fchdir(root_fd);
69637- sys_chroot(".");
69638+ sys_chroot((const char __force_user *)".");
69639 sys_close(old_fd);
69640 sys_close(root_fd);
69641
69642 if (new_decode_dev(real_root_dev) == Root_RAM0) {
69643- sys_chdir("/old");
69644+ sys_chdir((const char __force_user *)"/old");
69645 return;
69646 }
69647
69648@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
69649 mount_root();
69650
69651 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
69652- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
69653+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
69654 if (!error)
69655 printk("okay\n");
69656 else {
69657- int fd = sys_open("/dev/root.old", O_RDWR, 0);
69658+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
69659 if (error == -ENOENT)
69660 printk("/initrd does not exist. Ignored.\n");
69661 else
69662 printk("failed\n");
69663 printk(KERN_NOTICE "Unmounting old root\n");
69664- sys_umount("/old", MNT_DETACH);
69665+ sys_umount((char __force_user *)"/old", MNT_DETACH);
69666 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
69667 if (fd < 0) {
69668 error = fd;
69669@@ -119,11 +119,11 @@ int __init initrd_load(void)
69670 * mounted in the normal path.
69671 */
69672 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
69673- sys_unlink("/initrd.image");
69674+ sys_unlink((const char __force_user *)"/initrd.image");
69675 handle_initrd();
69676 return 1;
69677 }
69678 }
69679- sys_unlink("/initrd.image");
69680+ sys_unlink((const char __force_user *)"/initrd.image");
69681 return 0;
69682 }
69683diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
69684index 69aebbf..c0bf6a7 100644
69685--- a/init/do_mounts_md.c
69686+++ b/init/do_mounts_md.c
69687@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
69688 partitioned ? "_d" : "", minor,
69689 md_setup_args[ent].device_names);
69690
69691- fd = sys_open(name, 0, 0);
69692+ fd = sys_open((char __force_user *)name, 0, 0);
69693 if (fd < 0) {
69694 printk(KERN_ERR "md: open failed - cannot start "
69695 "array %s\n", name);
69696@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
69697 * array without it
69698 */
69699 sys_close(fd);
69700- fd = sys_open(name, 0, 0);
69701+ fd = sys_open((char __force_user *)name, 0, 0);
69702 sys_ioctl(fd, BLKRRPART, 0);
69703 }
69704 sys_close(fd);
69705@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
69706
69707 wait_for_device_probe();
69708
69709- fd = sys_open("/dev/md0", 0, 0);
69710+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
69711 if (fd >= 0) {
69712 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
69713 sys_close(fd);
69714diff --git a/init/initramfs.c b/init/initramfs.c
69715index 1fd59b8..a01b079 100644
69716--- a/init/initramfs.c
69717+++ b/init/initramfs.c
69718@@ -74,7 +74,7 @@ static void __init free_hash(void)
69719 }
69720 }
69721
69722-static long __init do_utime(char __user *filename, time_t mtime)
69723+static long __init do_utime(__force char __user *filename, time_t mtime)
69724 {
69725 struct timespec t[2];
69726
69727@@ -109,7 +109,7 @@ static void __init dir_utime(void)
69728 struct dir_entry *de, *tmp;
69729 list_for_each_entry_safe(de, tmp, &dir_list, list) {
69730 list_del(&de->list);
69731- do_utime(de->name, de->mtime);
69732+ do_utime((char __force_user *)de->name, de->mtime);
69733 kfree(de->name);
69734 kfree(de);
69735 }
69736@@ -271,7 +271,7 @@ static int __init maybe_link(void)
69737 if (nlink >= 2) {
69738 char *old = find_link(major, minor, ino, mode, collected);
69739 if (old)
69740- return (sys_link(old, collected) < 0) ? -1 : 1;
69741+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
69742 }
69743 return 0;
69744 }
69745@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
69746 {
69747 struct stat st;
69748
69749- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
69750+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
69751 if (S_ISDIR(st.st_mode))
69752- sys_rmdir(path);
69753+ sys_rmdir((char __force_user *)path);
69754 else
69755- sys_unlink(path);
69756+ sys_unlink((char __force_user *)path);
69757 }
69758 }
69759
69760@@ -305,7 +305,7 @@ static int __init do_name(void)
69761 int openflags = O_WRONLY|O_CREAT;
69762 if (ml != 1)
69763 openflags |= O_TRUNC;
69764- wfd = sys_open(collected, openflags, mode);
69765+ wfd = sys_open((char __force_user *)collected, openflags, mode);
69766
69767 if (wfd >= 0) {
69768 sys_fchown(wfd, uid, gid);
69769@@ -317,17 +317,17 @@ static int __init do_name(void)
69770 }
69771 }
69772 } else if (S_ISDIR(mode)) {
69773- sys_mkdir(collected, mode);
69774- sys_chown(collected, uid, gid);
69775- sys_chmod(collected, mode);
69776+ sys_mkdir((char __force_user *)collected, mode);
69777+ sys_chown((char __force_user *)collected, uid, gid);
69778+ sys_chmod((char __force_user *)collected, mode);
69779 dir_add(collected, mtime);
69780 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
69781 S_ISFIFO(mode) || S_ISSOCK(mode)) {
69782 if (maybe_link() == 0) {
69783- sys_mknod(collected, mode, rdev);
69784- sys_chown(collected, uid, gid);
69785- sys_chmod(collected, mode);
69786- do_utime(collected, mtime);
69787+ sys_mknod((char __force_user *)collected, mode, rdev);
69788+ sys_chown((char __force_user *)collected, uid, gid);
69789+ sys_chmod((char __force_user *)collected, mode);
69790+ do_utime((char __force_user *)collected, mtime);
69791 }
69792 }
69793 return 0;
69794@@ -336,15 +336,15 @@ static int __init do_name(void)
69795 static int __init do_copy(void)
69796 {
69797 if (count >= body_len) {
69798- sys_write(wfd, victim, body_len);
69799+ sys_write(wfd, (char __force_user *)victim, body_len);
69800 sys_close(wfd);
69801- do_utime(vcollected, mtime);
69802+ do_utime((char __force_user *)vcollected, mtime);
69803 kfree(vcollected);
69804 eat(body_len);
69805 state = SkipIt;
69806 return 0;
69807 } else {
69808- sys_write(wfd, victim, count);
69809+ sys_write(wfd, (char __force_user *)victim, count);
69810 body_len -= count;
69811 eat(count);
69812 return 1;
69813@@ -355,9 +355,9 @@ static int __init do_symlink(void)
69814 {
69815 collected[N_ALIGN(name_len) + body_len] = '\0';
69816 clean_path(collected, 0);
69817- sys_symlink(collected + N_ALIGN(name_len), collected);
69818- sys_lchown(collected, uid, gid);
69819- do_utime(collected, mtime);
69820+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
69821+ sys_lchown((char __force_user *)collected, uid, gid);
69822+ do_utime((char __force_user *)collected, mtime);
69823 state = SkipIt;
69824 next_state = Reset;
69825 return 0;
69826diff --git a/init/main.c b/init/main.c
69827index 1eb4bd5..da8c6f5 100644
69828--- a/init/main.c
69829+++ b/init/main.c
69830@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
69831 #ifdef CONFIG_TC
69832 extern void tc_init(void);
69833 #endif
69834+extern void grsecurity_init(void);
69835
69836 enum system_states system_state __read_mostly;
69837 EXPORT_SYMBOL(system_state);
69838@@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
69839
69840 __setup("reset_devices", set_reset_devices);
69841
69842+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
69843+extern char pax_enter_kernel_user[];
69844+extern char pax_exit_kernel_user[];
69845+extern pgdval_t clone_pgd_mask;
69846+#endif
69847+
69848+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
69849+static int __init setup_pax_nouderef(char *str)
69850+{
69851+#ifdef CONFIG_X86_32
69852+ unsigned int cpu;
69853+ struct desc_struct *gdt;
69854+
69855+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
69856+ gdt = get_cpu_gdt_table(cpu);
69857+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
69858+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
69859+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
69860+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
69861+ }
69862+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
69863+#else
69864+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
69865+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
69866+ clone_pgd_mask = ~(pgdval_t)0UL;
69867+#endif
69868+
69869+ return 0;
69870+}
69871+early_param("pax_nouderef", setup_pax_nouderef);
69872+#endif
69873+
69874+#ifdef CONFIG_PAX_SOFTMODE
69875+int pax_softmode;
69876+
69877+static int __init setup_pax_softmode(char *str)
69878+{
69879+ get_option(&str, &pax_softmode);
69880+ return 1;
69881+}
69882+__setup("pax_softmode=", setup_pax_softmode);
69883+#endif
69884+
69885 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
69886 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
69887 static const char *panic_later, *panic_param;
69888@@ -705,52 +749,53 @@ int initcall_debug;
69889 core_param(initcall_debug, initcall_debug, bool, 0644);
69890
69891 static char msgbuf[64];
69892-static struct boot_trace_call call;
69893-static struct boot_trace_ret ret;
69894+static struct boot_trace_call trace_call;
69895+static struct boot_trace_ret trace_ret;
69896
69897 int do_one_initcall(initcall_t fn)
69898 {
69899 int count = preempt_count();
69900 ktime_t calltime, delta, rettime;
69901+ const char *msg1 = "", *msg2 = "";
69902
69903 if (initcall_debug) {
69904- call.caller = task_pid_nr(current);
69905- printk("calling %pF @ %i\n", fn, call.caller);
69906+ trace_call.caller = task_pid_nr(current);
69907+ printk("calling %pF @ %i\n", fn, trace_call.caller);
69908 calltime = ktime_get();
69909- trace_boot_call(&call, fn);
69910+ trace_boot_call(&trace_call, fn);
69911 enable_boot_trace();
69912 }
69913
69914- ret.result = fn();
69915+ trace_ret.result = fn();
69916
69917 if (initcall_debug) {
69918 disable_boot_trace();
69919 rettime = ktime_get();
69920 delta = ktime_sub(rettime, calltime);
69921- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
69922- trace_boot_ret(&ret, fn);
69923+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
69924+ trace_boot_ret(&trace_ret, fn);
69925 printk("initcall %pF returned %d after %Ld usecs\n", fn,
69926- ret.result, ret.duration);
69927+ trace_ret.result, trace_ret.duration);
69928 }
69929
69930 msgbuf[0] = 0;
69931
69932- if (ret.result && ret.result != -ENODEV && initcall_debug)
69933- sprintf(msgbuf, "error code %d ", ret.result);
69934+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
69935+ sprintf(msgbuf, "error code %d ", trace_ret.result);
69936
69937 if (preempt_count() != count) {
69938- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
69939+ msg1 = " preemption imbalance";
69940 preempt_count() = count;
69941 }
69942 if (irqs_disabled()) {
69943- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
69944+ msg2 = " disabled interrupts";
69945 local_irq_enable();
69946 }
69947- if (msgbuf[0]) {
69948- printk("initcall %pF returned with %s\n", fn, msgbuf);
69949+ if (msgbuf[0] || *msg1 || *msg2) {
69950+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
69951 }
69952
69953- return ret.result;
69954+ return trace_ret.result;
69955 }
69956
69957
69958@@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
69959 if (!ramdisk_execute_command)
69960 ramdisk_execute_command = "/init";
69961
69962- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
69963+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
69964 ramdisk_execute_command = NULL;
69965 prepare_namespace();
69966 }
69967
69968+ grsecurity_init();
69969+
69970 /*
69971 * Ok, we have completed the initial bootup, and
69972 * we're essentially up and running. Get rid of the
69973diff --git a/init/noinitramfs.c b/init/noinitramfs.c
69974index f4c1a3a..96c19bd 100644
69975--- a/init/noinitramfs.c
69976+++ b/init/noinitramfs.c
69977@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
69978 {
69979 int err;
69980
69981- err = sys_mkdir("/dev", 0755);
69982+ err = sys_mkdir((const char __user *)"/dev", 0755);
69983 if (err < 0)
69984 goto out;
69985
69986@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
69987 if (err < 0)
69988 goto out;
69989
69990- err = sys_mkdir("/root", 0700);
69991+ err = sys_mkdir((const char __user *)"/root", 0700);
69992 if (err < 0)
69993 goto out;
69994
69995diff --git a/ipc/mqueue.c b/ipc/mqueue.c
69996index d01bc14..8df81db 100644
69997--- a/ipc/mqueue.c
69998+++ b/ipc/mqueue.c
69999@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
70000 mq_bytes = (mq_msg_tblsz +
70001 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
70002
70003+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
70004 spin_lock(&mq_lock);
70005 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
70006 u->mq_bytes + mq_bytes >
70007diff --git a/ipc/msg.c b/ipc/msg.c
70008index 779f762..4af9e36 100644
70009--- a/ipc/msg.c
70010+++ b/ipc/msg.c
70011@@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
70012 return security_msg_queue_associate(msq, msgflg);
70013 }
70014
70015+static struct ipc_ops msg_ops = {
70016+ .getnew = newque,
70017+ .associate = msg_security,
70018+ .more_checks = NULL
70019+};
70020+
70021 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
70022 {
70023 struct ipc_namespace *ns;
70024- struct ipc_ops msg_ops;
70025 struct ipc_params msg_params;
70026
70027 ns = current->nsproxy->ipc_ns;
70028
70029- msg_ops.getnew = newque;
70030- msg_ops.associate = msg_security;
70031- msg_ops.more_checks = NULL;
70032-
70033 msg_params.key = key;
70034 msg_params.flg = msgflg;
70035
70036diff --git a/ipc/sem.c b/ipc/sem.c
70037index b781007..f738b04 100644
70038--- a/ipc/sem.c
70039+++ b/ipc/sem.c
70040@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
70041 return 0;
70042 }
70043
70044+static struct ipc_ops sem_ops = {
70045+ .getnew = newary,
70046+ .associate = sem_security,
70047+ .more_checks = sem_more_checks
70048+};
70049+
70050 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
70051 {
70052 struct ipc_namespace *ns;
70053- struct ipc_ops sem_ops;
70054 struct ipc_params sem_params;
70055
70056 ns = current->nsproxy->ipc_ns;
70057@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
70058 if (nsems < 0 || nsems > ns->sc_semmsl)
70059 return -EINVAL;
70060
70061- sem_ops.getnew = newary;
70062- sem_ops.associate = sem_security;
70063- sem_ops.more_checks = sem_more_checks;
70064-
70065 sem_params.key = key;
70066 sem_params.flg = semflg;
70067 sem_params.u.nsems = nsems;
70068@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
70069 ushort* sem_io = fast_sem_io;
70070 int nsems;
70071
70072+ pax_track_stack();
70073+
70074 sma = sem_lock_check(ns, semid);
70075 if (IS_ERR(sma))
70076 return PTR_ERR(sma);
70077@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
70078 unsigned long jiffies_left = 0;
70079 struct ipc_namespace *ns;
70080
70081+ pax_track_stack();
70082+
70083 ns = current->nsproxy->ipc_ns;
70084
70085 if (nsops < 1 || semid < 0)
70086diff --git a/ipc/shm.c b/ipc/shm.c
70087index d30732c..7379456 100644
70088--- a/ipc/shm.c
70089+++ b/ipc/shm.c
70090@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
70091 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
70092 #endif
70093
70094+#ifdef CONFIG_GRKERNSEC
70095+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70096+ const time_t shm_createtime, const uid_t cuid,
70097+ const int shmid);
70098+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70099+ const time_t shm_createtime);
70100+#endif
70101+
70102 void shm_init_ns(struct ipc_namespace *ns)
70103 {
70104 ns->shm_ctlmax = SHMMAX;
70105@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
70106 shp->shm_lprid = 0;
70107 shp->shm_atim = shp->shm_dtim = 0;
70108 shp->shm_ctim = get_seconds();
70109+#ifdef CONFIG_GRKERNSEC
70110+ {
70111+ struct timespec timeval;
70112+ do_posix_clock_monotonic_gettime(&timeval);
70113+
70114+ shp->shm_createtime = timeval.tv_sec;
70115+ }
70116+#endif
70117 shp->shm_segsz = size;
70118 shp->shm_nattch = 0;
70119 shp->shm_file = file;
70120@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
70121 return 0;
70122 }
70123
70124+static struct ipc_ops shm_ops = {
70125+ .getnew = newseg,
70126+ .associate = shm_security,
70127+ .more_checks = shm_more_checks
70128+};
70129+
70130 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
70131 {
70132 struct ipc_namespace *ns;
70133- struct ipc_ops shm_ops;
70134 struct ipc_params shm_params;
70135
70136 ns = current->nsproxy->ipc_ns;
70137
70138- shm_ops.getnew = newseg;
70139- shm_ops.associate = shm_security;
70140- shm_ops.more_checks = shm_more_checks;
70141-
70142 shm_params.key = key;
70143 shm_params.flg = shmflg;
70144 shm_params.u.size = size;
70145@@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
70146 if (err)
70147 goto out_unlock;
70148
70149+#ifdef CONFIG_GRKERNSEC
70150+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
70151+ shp->shm_perm.cuid, shmid) ||
70152+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
70153+ err = -EACCES;
70154+ goto out_unlock;
70155+ }
70156+#endif
70157+
70158 path.dentry = dget(shp->shm_file->f_path.dentry);
70159 path.mnt = shp->shm_file->f_path.mnt;
70160 shp->shm_nattch++;
70161+#ifdef CONFIG_GRKERNSEC
70162+ shp->shm_lapid = current->pid;
70163+#endif
70164 size = i_size_read(path.dentry->d_inode);
70165 shm_unlock(shp);
70166
70167diff --git a/kernel/acct.c b/kernel/acct.c
70168index a6605ca..ca91111 100644
70169--- a/kernel/acct.c
70170+++ b/kernel/acct.c
70171@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
70172 */
70173 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
70174 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
70175- file->f_op->write(file, (char *)&ac,
70176+ file->f_op->write(file, (char __force_user *)&ac,
70177 sizeof(acct_t), &file->f_pos);
70178 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
70179 set_fs(fs);
70180diff --git a/kernel/audit.c b/kernel/audit.c
70181index 5feed23..513b02c 100644
70182--- a/kernel/audit.c
70183+++ b/kernel/audit.c
70184@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
70185 3) suppressed due to audit_rate_limit
70186 4) suppressed due to audit_backlog_limit
70187 */
70188-static atomic_t audit_lost = ATOMIC_INIT(0);
70189+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
70190
70191 /* The netlink socket. */
70192 static struct sock *audit_sock;
70193@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
70194 unsigned long now;
70195 int print;
70196
70197- atomic_inc(&audit_lost);
70198+ atomic_inc_unchecked(&audit_lost);
70199
70200 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
70201
70202@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
70203 printk(KERN_WARNING
70204 "audit: audit_lost=%d audit_rate_limit=%d "
70205 "audit_backlog_limit=%d\n",
70206- atomic_read(&audit_lost),
70207+ atomic_read_unchecked(&audit_lost),
70208 audit_rate_limit,
70209 audit_backlog_limit);
70210 audit_panic(message);
70211@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
70212 status_set.pid = audit_pid;
70213 status_set.rate_limit = audit_rate_limit;
70214 status_set.backlog_limit = audit_backlog_limit;
70215- status_set.lost = atomic_read(&audit_lost);
70216+ status_set.lost = atomic_read_unchecked(&audit_lost);
70217 status_set.backlog = skb_queue_len(&audit_skb_queue);
70218 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
70219 &status_set, sizeof(status_set));
70220@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
70221 spin_unlock_irq(&tsk->sighand->siglock);
70222 }
70223 read_unlock(&tasklist_lock);
70224- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
70225- &s, sizeof(s));
70226+
70227+ if (!err)
70228+ audit_send_reply(NETLINK_CB(skb).pid, seq,
70229+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
70230 break;
70231 }
70232 case AUDIT_TTY_SET: {
70233diff --git a/kernel/auditsc.c b/kernel/auditsc.c
70234index 267e484..f8e295a 100644
70235--- a/kernel/auditsc.c
70236+++ b/kernel/auditsc.c
70237@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
70238 }
70239
70240 /* global counter which is incremented every time something logs in */
70241-static atomic_t session_id = ATOMIC_INIT(0);
70242+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
70243
70244 /**
70245 * audit_set_loginuid - set a task's audit_context loginuid
70246@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
70247 */
70248 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
70249 {
70250- unsigned int sessionid = atomic_inc_return(&session_id);
70251+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
70252 struct audit_context *context = task->audit_context;
70253
70254 if (context && context->in_syscall) {
70255diff --git a/kernel/capability.c b/kernel/capability.c
70256index 8a944f5..db5001e 100644
70257--- a/kernel/capability.c
70258+++ b/kernel/capability.c
70259@@ -305,10 +305,26 @@ int capable(int cap)
70260 BUG();
70261 }
70262
70263- if (security_capable(cap) == 0) {
70264+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
70265 current->flags |= PF_SUPERPRIV;
70266 return 1;
70267 }
70268 return 0;
70269 }
70270+
70271+int capable_nolog(int cap)
70272+{
70273+ if (unlikely(!cap_valid(cap))) {
70274+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
70275+ BUG();
70276+ }
70277+
70278+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
70279+ current->flags |= PF_SUPERPRIV;
70280+ return 1;
70281+ }
70282+ return 0;
70283+}
70284+
70285 EXPORT_SYMBOL(capable);
70286+EXPORT_SYMBOL(capable_nolog);
70287diff --git a/kernel/cgroup.c b/kernel/cgroup.c
70288index 1fbcc74..7000012 100644
70289--- a/kernel/cgroup.c
70290+++ b/kernel/cgroup.c
70291@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
70292 struct hlist_head *hhead;
70293 struct cg_cgroup_link *link;
70294
70295+ pax_track_stack();
70296+
70297 /* First see if we already have a cgroup group that matches
70298 * the desired set */
70299 read_lock(&css_set_lock);
70300diff --git a/kernel/compat.c b/kernel/compat.c
70301index 8bc5578..186e44a 100644
70302--- a/kernel/compat.c
70303+++ b/kernel/compat.c
70304@@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
70305 mm_segment_t oldfs;
70306 long ret;
70307
70308- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
70309+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
70310 oldfs = get_fs();
70311 set_fs(KERNEL_DS);
70312 ret = hrtimer_nanosleep_restart(restart);
70313@@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
70314 oldfs = get_fs();
70315 set_fs(KERNEL_DS);
70316 ret = hrtimer_nanosleep(&tu,
70317- rmtp ? (struct timespec __user *)&rmt : NULL,
70318+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
70319 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
70320 set_fs(oldfs);
70321
70322@@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
70323 mm_segment_t old_fs = get_fs();
70324
70325 set_fs(KERNEL_DS);
70326- ret = sys_sigpending((old_sigset_t __user *) &s);
70327+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
70328 set_fs(old_fs);
70329 if (ret == 0)
70330 ret = put_user(s, set);
70331@@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
70332 old_fs = get_fs();
70333 set_fs(KERNEL_DS);
70334 ret = sys_sigprocmask(how,
70335- set ? (old_sigset_t __user *) &s : NULL,
70336- oset ? (old_sigset_t __user *) &s : NULL);
70337+ set ? (old_sigset_t __force_user *) &s : NULL,
70338+ oset ? (old_sigset_t __force_user *) &s : NULL);
70339 set_fs(old_fs);
70340 if (ret == 0)
70341 if (oset)
70342@@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
70343 mm_segment_t old_fs = get_fs();
70344
70345 set_fs(KERNEL_DS);
70346- ret = sys_old_getrlimit(resource, &r);
70347+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
70348 set_fs(old_fs);
70349
70350 if (!ret) {
70351@@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
70352 mm_segment_t old_fs = get_fs();
70353
70354 set_fs(KERNEL_DS);
70355- ret = sys_getrusage(who, (struct rusage __user *) &r);
70356+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
70357 set_fs(old_fs);
70358
70359 if (ret)
70360@@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
70361 set_fs (KERNEL_DS);
70362 ret = sys_wait4(pid,
70363 (stat_addr ?
70364- (unsigned int __user *) &status : NULL),
70365- options, (struct rusage __user *) &r);
70366+ (unsigned int __force_user *) &status : NULL),
70367+ options, (struct rusage __force_user *) &r);
70368 set_fs (old_fs);
70369
70370 if (ret > 0) {
70371@@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
70372 memset(&info, 0, sizeof(info));
70373
70374 set_fs(KERNEL_DS);
70375- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
70376- uru ? (struct rusage __user *)&ru : NULL);
70377+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
70378+ uru ? (struct rusage __force_user *)&ru : NULL);
70379 set_fs(old_fs);
70380
70381 if ((ret < 0) || (info.si_signo == 0))
70382@@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
70383 oldfs = get_fs();
70384 set_fs(KERNEL_DS);
70385 err = sys_timer_settime(timer_id, flags,
70386- (struct itimerspec __user *) &newts,
70387- (struct itimerspec __user *) &oldts);
70388+ (struct itimerspec __force_user *) &newts,
70389+ (struct itimerspec __force_user *) &oldts);
70390 set_fs(oldfs);
70391 if (!err && old && put_compat_itimerspec(old, &oldts))
70392 return -EFAULT;
70393@@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
70394 oldfs = get_fs();
70395 set_fs(KERNEL_DS);
70396 err = sys_timer_gettime(timer_id,
70397- (struct itimerspec __user *) &ts);
70398+ (struct itimerspec __force_user *) &ts);
70399 set_fs(oldfs);
70400 if (!err && put_compat_itimerspec(setting, &ts))
70401 return -EFAULT;
70402@@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
70403 oldfs = get_fs();
70404 set_fs(KERNEL_DS);
70405 err = sys_clock_settime(which_clock,
70406- (struct timespec __user *) &ts);
70407+ (struct timespec __force_user *) &ts);
70408 set_fs(oldfs);
70409 return err;
70410 }
70411@@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
70412 oldfs = get_fs();
70413 set_fs(KERNEL_DS);
70414 err = sys_clock_gettime(which_clock,
70415- (struct timespec __user *) &ts);
70416+ (struct timespec __force_user *) &ts);
70417 set_fs(oldfs);
70418 if (!err && put_compat_timespec(&ts, tp))
70419 return -EFAULT;
70420@@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
70421 oldfs = get_fs();
70422 set_fs(KERNEL_DS);
70423 err = sys_clock_getres(which_clock,
70424- (struct timespec __user *) &ts);
70425+ (struct timespec __force_user *) &ts);
70426 set_fs(oldfs);
70427 if (!err && tp && put_compat_timespec(&ts, tp))
70428 return -EFAULT;
70429@@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
70430 long err;
70431 mm_segment_t oldfs;
70432 struct timespec tu;
70433- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
70434+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
70435
70436- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
70437+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
70438 oldfs = get_fs();
70439 set_fs(KERNEL_DS);
70440 err = clock_nanosleep_restart(restart);
70441@@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
70442 oldfs = get_fs();
70443 set_fs(KERNEL_DS);
70444 err = sys_clock_nanosleep(which_clock, flags,
70445- (struct timespec __user *) &in,
70446- (struct timespec __user *) &out);
70447+ (struct timespec __force_user *) &in,
70448+ (struct timespec __force_user *) &out);
70449 set_fs(oldfs);
70450
70451 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
70452diff --git a/kernel/configs.c b/kernel/configs.c
70453index abaee68..047facd 100644
70454--- a/kernel/configs.c
70455+++ b/kernel/configs.c
70456@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
70457 struct proc_dir_entry *entry;
70458
70459 /* create the current config file */
70460+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
70461+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
70462+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
70463+ &ikconfig_file_ops);
70464+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
70465+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
70466+ &ikconfig_file_ops);
70467+#endif
70468+#else
70469 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
70470 &ikconfig_file_ops);
70471+#endif
70472+
70473 if (!entry)
70474 return -ENOMEM;
70475
70476diff --git a/kernel/cpu.c b/kernel/cpu.c
70477index 7e8b6ac..8921388 100644
70478--- a/kernel/cpu.c
70479+++ b/kernel/cpu.c
70480@@ -19,7 +19,7 @@
70481 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
70482 static DEFINE_MUTEX(cpu_add_remove_lock);
70483
70484-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
70485+static RAW_NOTIFIER_HEAD(cpu_chain);
70486
70487 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
70488 * Should always be manipulated under cpu_add_remove_lock
70489diff --git a/kernel/cred.c b/kernel/cred.c
70490index 0b5b5fc..419b86a 100644
70491--- a/kernel/cred.c
70492+++ b/kernel/cred.c
70493@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
70494 */
70495 void __put_cred(struct cred *cred)
70496 {
70497+ pax_track_stack();
70498+
70499 kdebug("__put_cred(%p{%d,%d})", cred,
70500 atomic_read(&cred->usage),
70501 read_cred_subscribers(cred));
70502@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
70503 {
70504 struct cred *cred;
70505
70506+ pax_track_stack();
70507+
70508 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
70509 atomic_read(&tsk->cred->usage),
70510 read_cred_subscribers(tsk->cred));
70511@@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct task_struct *task)
70512 {
70513 const struct cred *cred;
70514
70515+ pax_track_stack();
70516+
70517 rcu_read_lock();
70518
70519 do {
70520@@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
70521 {
70522 struct cred *new;
70523
70524+ pax_track_stack();
70525+
70526 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
70527 if (!new)
70528 return NULL;
70529@@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
70530 const struct cred *old;
70531 struct cred *new;
70532
70533+ pax_track_stack();
70534+
70535 validate_process_creds();
70536
70537 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
70538@@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
70539 struct thread_group_cred *tgcred = NULL;
70540 struct cred *new;
70541
70542+ pax_track_stack();
70543+
70544 #ifdef CONFIG_KEYS
70545 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
70546 if (!tgcred)
70547@@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
70548 struct cred *new;
70549 int ret;
70550
70551+ pax_track_stack();
70552+
70553 mutex_init(&p->cred_guard_mutex);
70554
70555 if (
70556@@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
70557 struct task_struct *task = current;
70558 const struct cred *old = task->real_cred;
70559
70560+ pax_track_stack();
70561+
70562 kdebug("commit_creds(%p{%d,%d})", new,
70563 atomic_read(&new->usage),
70564 read_cred_subscribers(new));
70565@@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
70566
70567 get_cred(new); /* we will require a ref for the subj creds too */
70568
70569+ gr_set_role_label(task, new->uid, new->gid);
70570+
70571 /* dumpability changes */
70572 if (old->euid != new->euid ||
70573 old->egid != new->egid ||
70574@@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
70575 key_fsgid_changed(task);
70576
70577 /* do it
70578- * - What if a process setreuid()'s and this brings the
70579- * new uid over his NPROC rlimit? We can check this now
70580- * cheaply with the new uid cache, so if it matters
70581- * we should be checking for it. -DaveM
70582+ * RLIMIT_NPROC limits on user->processes have already been checked
70583+ * in set_user().
70584 */
70585 alter_cred_subscribers(new, 2);
70586 if (new->user != old->user)
70587@@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
70588 */
70589 void abort_creds(struct cred *new)
70590 {
70591+ pax_track_stack();
70592+
70593 kdebug("abort_creds(%p{%d,%d})", new,
70594 atomic_read(&new->usage),
70595 read_cred_subscribers(new));
70596@@ -629,6 +647,8 @@ const struct cred *override_creds(const struct cred *new)
70597 {
70598 const struct cred *old = current->cred;
70599
70600+ pax_track_stack();
70601+
70602 kdebug("override_creds(%p{%d,%d})", new,
70603 atomic_read(&new->usage),
70604 read_cred_subscribers(new));
70605@@ -658,6 +678,8 @@ void revert_creds(const struct cred *old)
70606 {
70607 const struct cred *override = current->cred;
70608
70609+ pax_track_stack();
70610+
70611 kdebug("revert_creds(%p{%d,%d})", old,
70612 atomic_read(&old->usage),
70613 read_cred_subscribers(old));
70614@@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
70615 const struct cred *old;
70616 struct cred *new;
70617
70618+ pax_track_stack();
70619+
70620 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
70621 if (!new)
70622 return NULL;
70623@@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
70624 */
70625 int set_security_override(struct cred *new, u32 secid)
70626 {
70627+ pax_track_stack();
70628+
70629 return security_kernel_act_as(new, secid);
70630 }
70631 EXPORT_SYMBOL(set_security_override);
70632@@ -777,6 +803,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
70633 u32 secid;
70634 int ret;
70635
70636+ pax_track_stack();
70637+
70638 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
70639 if (ret < 0)
70640 return ret;
70641diff --git a/kernel/exit.c b/kernel/exit.c
70642index 0f8fae3..9344a56 100644
70643--- a/kernel/exit.c
70644+++ b/kernel/exit.c
70645@@ -55,6 +55,10 @@
70646 #include <asm/pgtable.h>
70647 #include <asm/mmu_context.h>
70648
70649+#ifdef CONFIG_GRKERNSEC
70650+extern rwlock_t grsec_exec_file_lock;
70651+#endif
70652+
70653 static void exit_mm(struct task_struct * tsk);
70654
70655 static void __unhash_process(struct task_struct *p)
70656@@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
70657 struct task_struct *leader;
70658 int zap_leader;
70659 repeat:
70660+#ifdef CONFIG_NET
70661+ gr_del_task_from_ip_table(p);
70662+#endif
70663+
70664 tracehook_prepare_release_task(p);
70665 /* don't need to get the RCU readlock here - the process is dead and
70666 * can't be modifying its own credentials */
70667@@ -397,7 +405,7 @@ int allow_signal(int sig)
70668 * know it'll be handled, so that they don't get converted to
70669 * SIGKILL or just silently dropped.
70670 */
70671- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
70672+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
70673 recalc_sigpending();
70674 spin_unlock_irq(&current->sighand->siglock);
70675 return 0;
70676@@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
70677 vsnprintf(current->comm, sizeof(current->comm), name, args);
70678 va_end(args);
70679
70680+#ifdef CONFIG_GRKERNSEC
70681+ write_lock(&grsec_exec_file_lock);
70682+ if (current->exec_file) {
70683+ fput(current->exec_file);
70684+ current->exec_file = NULL;
70685+ }
70686+ write_unlock(&grsec_exec_file_lock);
70687+#endif
70688+
70689+ gr_set_kernel_label(current);
70690+
70691 /*
70692 * If we were started as result of loading a module, close all of the
70693 * user space pages. We don't need them, and if we didn't close them
70694@@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
70695 struct task_struct *tsk = current;
70696 int group_dead;
70697
70698- profile_task_exit(tsk);
70699-
70700- WARN_ON(atomic_read(&tsk->fs_excl));
70701-
70702+ /*
70703+ * Check this first since set_fs() below depends on
70704+ * current_thread_info(), which we better not access when we're in
70705+ * interrupt context. Other than that, we want to do the set_fs()
70706+ * as early as possible.
70707+ */
70708 if (unlikely(in_interrupt()))
70709 panic("Aiee, killing interrupt handler!");
70710- if (unlikely(!tsk->pid))
70711- panic("Attempted to kill the idle task!");
70712
70713 /*
70714- * If do_exit is called because this processes oopsed, it's possible
70715+ * If do_exit is called because this processes Oops'ed, it's possible
70716 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
70717 * continuing. Amongst other possible reasons, this is to prevent
70718 * mm_release()->clear_child_tid() from writing to a user-controlled
70719@@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
70720 */
70721 set_fs(USER_DS);
70722
70723+ profile_task_exit(tsk);
70724+
70725+ WARN_ON(atomic_read(&tsk->fs_excl));
70726+
70727+ if (unlikely(!tsk->pid))
70728+ panic("Attempted to kill the idle task!");
70729+
70730 tracehook_report_exit(&code);
70731
70732 validate_creds_for_do_exit(tsk);
70733@@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
70734 tsk->exit_code = code;
70735 taskstats_exit(tsk, group_dead);
70736
70737+ gr_acl_handle_psacct(tsk, code);
70738+ gr_acl_handle_exit();
70739+
70740 exit_mm(tsk);
70741
70742 if (group_dead)
70743@@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
70744
70745 if (unlikely(wo->wo_flags & WNOWAIT)) {
70746 int exit_code = p->exit_code;
70747- int why, status;
70748+ int why;
70749
70750 get_task_struct(p);
70751 read_unlock(&tasklist_lock);
70752diff --git a/kernel/fork.c b/kernel/fork.c
70753index 4bde56f..29a9bab 100644
70754--- a/kernel/fork.c
70755+++ b/kernel/fork.c
70756@@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
70757 *stackend = STACK_END_MAGIC; /* for overflow detection */
70758
70759 #ifdef CONFIG_CC_STACKPROTECTOR
70760- tsk->stack_canary = get_random_int();
70761+ tsk->stack_canary = pax_get_random_long();
70762 #endif
70763
70764 /* One for us, one for whoever does the "release_task()" (usually parent) */
70765@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
70766 mm->locked_vm = 0;
70767 mm->mmap = NULL;
70768 mm->mmap_cache = NULL;
70769- mm->free_area_cache = oldmm->mmap_base;
70770- mm->cached_hole_size = ~0UL;
70771+ mm->free_area_cache = oldmm->free_area_cache;
70772+ mm->cached_hole_size = oldmm->cached_hole_size;
70773 mm->map_count = 0;
70774 cpumask_clear(mm_cpumask(mm));
70775 mm->mm_rb = RB_ROOT;
70776@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
70777 tmp->vm_flags &= ~VM_LOCKED;
70778 tmp->vm_mm = mm;
70779 tmp->vm_next = tmp->vm_prev = NULL;
70780+ tmp->vm_mirror = NULL;
70781 anon_vma_link(tmp);
70782 file = tmp->vm_file;
70783 if (file) {
70784@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
70785 if (retval)
70786 goto out;
70787 }
70788+
70789+#ifdef CONFIG_PAX_SEGMEXEC
70790+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
70791+ struct vm_area_struct *mpnt_m;
70792+
70793+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
70794+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
70795+
70796+ if (!mpnt->vm_mirror)
70797+ continue;
70798+
70799+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
70800+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
70801+ mpnt->vm_mirror = mpnt_m;
70802+ } else {
70803+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
70804+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
70805+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
70806+ mpnt->vm_mirror->vm_mirror = mpnt;
70807+ }
70808+ }
70809+ BUG_ON(mpnt_m);
70810+ }
70811+#endif
70812+
70813 /* a new mm has just been created */
70814 arch_dup_mmap(oldmm, mm);
70815 retval = 0;
70816@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
70817 write_unlock(&fs->lock);
70818 return -EAGAIN;
70819 }
70820- fs->users++;
70821+ atomic_inc(&fs->users);
70822 write_unlock(&fs->lock);
70823 return 0;
70824 }
70825 tsk->fs = copy_fs_struct(fs);
70826 if (!tsk->fs)
70827 return -ENOMEM;
70828+ gr_set_chroot_entries(tsk, &tsk->fs->root);
70829 return 0;
70830 }
70831
70832@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
70833 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
70834 #endif
70835 retval = -EAGAIN;
70836+
70837+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
70838+
70839 if (atomic_read(&p->real_cred->user->processes) >=
70840 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
70841- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
70842- p->real_cred->user != INIT_USER)
70843+ if (p->real_cred->user != INIT_USER &&
70844+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
70845 goto bad_fork_free;
70846 }
70847+ current->flags &= ~PF_NPROC_EXCEEDED;
70848
70849 retval = copy_creds(p, clone_flags);
70850 if (retval < 0)
70851@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
70852 goto bad_fork_free_pid;
70853 }
70854
70855+ gr_copy_label(p);
70856+
70857 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
70858 /*
70859 * Clear TID on mm_release()?
70860@@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
70861 bad_fork_free:
70862 free_task(p);
70863 fork_out:
70864+ gr_log_forkfail(retval);
70865+
70866 return ERR_PTR(retval);
70867 }
70868
70869@@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
70870 if (clone_flags & CLONE_PARENT_SETTID)
70871 put_user(nr, parent_tidptr);
70872
70873+ gr_handle_brute_check();
70874+
70875 if (clone_flags & CLONE_VFORK) {
70876 p->vfork_done = &vfork;
70877 init_completion(&vfork);
70878@@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
70879 return 0;
70880
70881 /* don't need lock here; in the worst case we'll do useless copy */
70882- if (fs->users == 1)
70883+ if (atomic_read(&fs->users) == 1)
70884 return 0;
70885
70886 *new_fsp = copy_fs_struct(fs);
70887@@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
70888 fs = current->fs;
70889 write_lock(&fs->lock);
70890 current->fs = new_fs;
70891- if (--fs->users)
70892+ gr_set_chroot_entries(current, &current->fs->root);
70893+ if (atomic_dec_return(&fs->users))
70894 new_fs = NULL;
70895 else
70896 new_fs = fs;
70897diff --git a/kernel/futex.c b/kernel/futex.c
70898index fb98c9f..f158c0c 100644
70899--- a/kernel/futex.c
70900+++ b/kernel/futex.c
70901@@ -54,6 +54,7 @@
70902 #include <linux/mount.h>
70903 #include <linux/pagemap.h>
70904 #include <linux/syscalls.h>
70905+#include <linux/ptrace.h>
70906 #include <linux/signal.h>
70907 #include <linux/module.h>
70908 #include <linux/magic.h>
70909@@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
70910 struct page *page;
70911 int err, ro = 0;
70912
70913+#ifdef CONFIG_PAX_SEGMEXEC
70914+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
70915+ return -EFAULT;
70916+#endif
70917+
70918 /*
70919 * The futex address must be "naturally" aligned.
70920 */
70921@@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
70922 struct futex_q q;
70923 int ret;
70924
70925+ pax_track_stack();
70926+
70927 if (!bitset)
70928 return -EINVAL;
70929
70930@@ -1871,7 +1879,7 @@ retry:
70931
70932 restart = &current_thread_info()->restart_block;
70933 restart->fn = futex_wait_restart;
70934- restart->futex.uaddr = (u32 *)uaddr;
70935+ restart->futex.uaddr = uaddr;
70936 restart->futex.val = val;
70937 restart->futex.time = abs_time->tv64;
70938 restart->futex.bitset = bitset;
70939@@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
70940 struct futex_q q;
70941 int res, ret;
70942
70943+ pax_track_stack();
70944+
70945 if (!bitset)
70946 return -EINVAL;
70947
70948@@ -2407,7 +2417,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
70949 {
70950 struct robust_list_head __user *head;
70951 unsigned long ret;
70952+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
70953 const struct cred *cred = current_cred(), *pcred;
70954+#endif
70955
70956 if (!futex_cmpxchg_enabled)
70957 return -ENOSYS;
70958@@ -2423,11 +2435,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
70959 if (!p)
70960 goto err_unlock;
70961 ret = -EPERM;
70962+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70963+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
70964+ goto err_unlock;
70965+#else
70966 pcred = __task_cred(p);
70967 if (cred->euid != pcred->euid &&
70968 cred->euid != pcred->uid &&
70969 !capable(CAP_SYS_PTRACE))
70970 goto err_unlock;
70971+#endif
70972 head = p->robust_list;
70973 rcu_read_unlock();
70974 }
70975@@ -2489,7 +2506,7 @@ retry:
70976 */
70977 static inline int fetch_robust_entry(struct robust_list __user **entry,
70978 struct robust_list __user * __user *head,
70979- int *pi)
70980+ unsigned int *pi)
70981 {
70982 unsigned long uentry;
70983
70984@@ -2670,6 +2687,7 @@ static int __init futex_init(void)
70985 {
70986 u32 curval;
70987 int i;
70988+ mm_segment_t oldfs;
70989
70990 /*
70991 * This will fail and we want it. Some arch implementations do
70992@@ -2681,7 +2699,10 @@ static int __init futex_init(void)
70993 * implementation, the non functional ones will return
70994 * -ENOSYS.
70995 */
70996+ oldfs = get_fs();
70997+ set_fs(USER_DS);
70998 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
70999+ set_fs(oldfs);
71000 if (curval == -EFAULT)
71001 futex_cmpxchg_enabled = 1;
71002
71003diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
71004index 2357165..8d70cee 100644
71005--- a/kernel/futex_compat.c
71006+++ b/kernel/futex_compat.c
71007@@ -10,6 +10,7 @@
71008 #include <linux/compat.h>
71009 #include <linux/nsproxy.h>
71010 #include <linux/futex.h>
71011+#include <linux/ptrace.h>
71012
71013 #include <asm/uaccess.h>
71014
71015@@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
71016 {
71017 struct compat_robust_list_head __user *head;
71018 unsigned long ret;
71019- const struct cred *cred = current_cred(), *pcred;
71020+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
71021+ const struct cred *cred = current_cred();
71022+ const struct cred *pcred;
71023+#endif
71024
71025 if (!futex_cmpxchg_enabled)
71026 return -ENOSYS;
71027@@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
71028 if (!p)
71029 goto err_unlock;
71030 ret = -EPERM;
71031+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71032+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
71033+ goto err_unlock;
71034+#else
71035 pcred = __task_cred(p);
71036 if (cred->euid != pcred->euid &&
71037 cred->euid != pcred->uid &&
71038 !capable(CAP_SYS_PTRACE))
71039 goto err_unlock;
71040+#endif
71041 head = p->compat_robust_list;
71042 read_unlock(&tasklist_lock);
71043 }
71044diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
71045index 9b22d03..6295b62 100644
71046--- a/kernel/gcov/base.c
71047+++ b/kernel/gcov/base.c
71048@@ -102,11 +102,6 @@ void gcov_enable_events(void)
71049 }
71050
71051 #ifdef CONFIG_MODULES
71052-static inline int within(void *addr, void *start, unsigned long size)
71053-{
71054- return ((addr >= start) && (addr < start + size));
71055-}
71056-
71057 /* Update list and generate events when modules are unloaded. */
71058 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
71059 void *data)
71060@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
71061 prev = NULL;
71062 /* Remove entries located in module from linked list. */
71063 for (info = gcov_info_head; info; info = info->next) {
71064- if (within(info, mod->module_core, mod->core_size)) {
71065+ if (within_module_core_rw((unsigned long)info, mod)) {
71066 if (prev)
71067 prev->next = info->next;
71068 else
71069diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
71070index a6e9d00..a0da4f9 100644
71071--- a/kernel/hrtimer.c
71072+++ b/kernel/hrtimer.c
71073@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
71074 local_irq_restore(flags);
71075 }
71076
71077-static void run_hrtimer_softirq(struct softirq_action *h)
71078+static void run_hrtimer_softirq(void)
71079 {
71080 hrtimer_peek_ahead_timers();
71081 }
71082diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
71083index 8b6b8b6..6bc87df 100644
71084--- a/kernel/kallsyms.c
71085+++ b/kernel/kallsyms.c
71086@@ -11,6 +11,9 @@
71087 * Changed the compression method from stem compression to "table lookup"
71088 * compression (see scripts/kallsyms.c for a more complete description)
71089 */
71090+#ifdef CONFIG_GRKERNSEC_HIDESYM
71091+#define __INCLUDED_BY_HIDESYM 1
71092+#endif
71093 #include <linux/kallsyms.h>
71094 #include <linux/module.h>
71095 #include <linux/init.h>
71096@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
71097
71098 static inline int is_kernel_inittext(unsigned long addr)
71099 {
71100+ if (system_state != SYSTEM_BOOTING)
71101+ return 0;
71102+
71103 if (addr >= (unsigned long)_sinittext
71104 && addr <= (unsigned long)_einittext)
71105 return 1;
71106 return 0;
71107 }
71108
71109+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71110+#ifdef CONFIG_MODULES
71111+static inline int is_module_text(unsigned long addr)
71112+{
71113+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
71114+ return 1;
71115+
71116+ addr = ktla_ktva(addr);
71117+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
71118+}
71119+#else
71120+static inline int is_module_text(unsigned long addr)
71121+{
71122+ return 0;
71123+}
71124+#endif
71125+#endif
71126+
71127 static inline int is_kernel_text(unsigned long addr)
71128 {
71129 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
71130@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
71131
71132 static inline int is_kernel(unsigned long addr)
71133 {
71134+
71135+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71136+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
71137+ return 1;
71138+
71139+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
71140+#else
71141 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
71142+#endif
71143+
71144 return 1;
71145 return in_gate_area_no_task(addr);
71146 }
71147
71148 static int is_ksym_addr(unsigned long addr)
71149 {
71150+
71151+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71152+ if (is_module_text(addr))
71153+ return 0;
71154+#endif
71155+
71156 if (all_var)
71157 return is_kernel(addr);
71158
71159@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
71160
71161 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
71162 {
71163- iter->name[0] = '\0';
71164 iter->nameoff = get_symbol_offset(new_pos);
71165 iter->pos = new_pos;
71166 }
71167@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
71168 {
71169 struct kallsym_iter *iter = m->private;
71170
71171+#ifdef CONFIG_GRKERNSEC_HIDESYM
71172+ if (current_uid())
71173+ return 0;
71174+#endif
71175+
71176 /* Some debugging symbols have no name. Ignore them. */
71177 if (!iter->name[0])
71178 return 0;
71179@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
71180 struct kallsym_iter *iter;
71181 int ret;
71182
71183- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
71184+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
71185 if (!iter)
71186 return -ENOMEM;
71187 reset_iter(iter, 0);
71188diff --git a/kernel/kexec.c b/kernel/kexec.c
71189index f336e21..9c1c20b 100644
71190--- a/kernel/kexec.c
71191+++ b/kernel/kexec.c
71192@@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
71193 unsigned long flags)
71194 {
71195 struct compat_kexec_segment in;
71196- struct kexec_segment out, __user *ksegments;
71197+ struct kexec_segment out;
71198+ struct kexec_segment __user *ksegments;
71199 unsigned long i, result;
71200
71201 /* Don't allow clients that don't understand the native
71202diff --git a/kernel/kgdb.c b/kernel/kgdb.c
71203index 53dae4b..9ba3743 100644
71204--- a/kernel/kgdb.c
71205+++ b/kernel/kgdb.c
71206@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
71207 /* Guard for recursive entry */
71208 static int exception_level;
71209
71210-static struct kgdb_io *kgdb_io_ops;
71211+static const struct kgdb_io *kgdb_io_ops;
71212 static DEFINE_SPINLOCK(kgdb_registration_lock);
71213
71214 /* kgdb console driver is loaded */
71215@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
71216 */
71217 static atomic_t passive_cpu_wait[NR_CPUS];
71218 static atomic_t cpu_in_kgdb[NR_CPUS];
71219-atomic_t kgdb_setting_breakpoint;
71220+atomic_unchecked_t kgdb_setting_breakpoint;
71221
71222 struct task_struct *kgdb_usethread;
71223 struct task_struct *kgdb_contthread;
71224@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
71225 sizeof(unsigned long)];
71226
71227 /* to keep track of the CPU which is doing the single stepping*/
71228-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
71229+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
71230
71231 /*
71232 * If you are debugging a problem where roundup (the collection of
71233@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
71234 return 0;
71235 if (kgdb_connected)
71236 return 1;
71237- if (atomic_read(&kgdb_setting_breakpoint))
71238+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
71239 return 1;
71240 if (print_wait)
71241 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
71242@@ -1426,8 +1426,8 @@ acquirelock:
71243 * instance of the exception handler wanted to come into the
71244 * debugger on a different CPU via a single step
71245 */
71246- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
71247- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
71248+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
71249+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
71250
71251 atomic_set(&kgdb_active, -1);
71252 touch_softlockup_watchdog();
71253@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
71254 *
71255 * Register it with the KGDB core.
71256 */
71257-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
71258+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
71259 {
71260 int err;
71261
71262@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
71263 *
71264 * Unregister it with the KGDB core.
71265 */
71266-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
71267+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
71268 {
71269 BUG_ON(kgdb_connected);
71270
71271@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
71272 */
71273 void kgdb_breakpoint(void)
71274 {
71275- atomic_set(&kgdb_setting_breakpoint, 1);
71276+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
71277 wmb(); /* Sync point before breakpoint */
71278 arch_kgdb_breakpoint();
71279 wmb(); /* Sync point after breakpoint */
71280- atomic_set(&kgdb_setting_breakpoint, 0);
71281+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
71282 }
71283 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
71284
71285diff --git a/kernel/kmod.c b/kernel/kmod.c
71286index d206078..e27ba6a 100644
71287--- a/kernel/kmod.c
71288+++ b/kernel/kmod.c
71289@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
71290 * If module auto-loading support is disabled then this function
71291 * becomes a no-operation.
71292 */
71293-int __request_module(bool wait, const char *fmt, ...)
71294+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
71295 {
71296- va_list args;
71297 char module_name[MODULE_NAME_LEN];
71298 unsigned int max_modprobes;
71299 int ret;
71300- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
71301+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
71302 static char *envp[] = { "HOME=/",
71303 "TERM=linux",
71304 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
71305@@ -84,12 +83,24 @@ int __request_module(bool wait, const char *fmt, ...)
71306 if (ret)
71307 return ret;
71308
71309- va_start(args, fmt);
71310- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
71311- va_end(args);
71312+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
71313 if (ret >= MODULE_NAME_LEN)
71314 return -ENAMETOOLONG;
71315
71316+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71317+ if (!current_uid()) {
71318+ /* hack to workaround consolekit/udisks stupidity */
71319+ read_lock(&tasklist_lock);
71320+ if (!strcmp(current->comm, "mount") &&
71321+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
71322+ read_unlock(&tasklist_lock);
71323+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
71324+ return -EPERM;
71325+ }
71326+ read_unlock(&tasklist_lock);
71327+ }
71328+#endif
71329+
71330 /* If modprobe needs a service that is in a module, we get a recursive
71331 * loop. Limit the number of running kmod threads to max_threads/2 or
71332 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
71333@@ -123,6 +134,48 @@ int __request_module(bool wait, const char *fmt, ...)
71334 atomic_dec(&kmod_concurrent);
71335 return ret;
71336 }
71337+
71338+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
71339+{
71340+ va_list args;
71341+ int ret;
71342+
71343+ va_start(args, fmt);
71344+ ret = ____request_module(wait, module_param, fmt, args);
71345+ va_end(args);
71346+
71347+ return ret;
71348+}
71349+
71350+int __request_module(bool wait, const char *fmt, ...)
71351+{
71352+ va_list args;
71353+ int ret;
71354+
71355+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71356+ if (current_uid()) {
71357+ char module_param[MODULE_NAME_LEN];
71358+
71359+ memset(module_param, 0, sizeof(module_param));
71360+
71361+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
71362+
71363+ va_start(args, fmt);
71364+ ret = ____request_module(wait, module_param, fmt, args);
71365+ va_end(args);
71366+
71367+ return ret;
71368+ }
71369+#endif
71370+
71371+ va_start(args, fmt);
71372+ ret = ____request_module(wait, NULL, fmt, args);
71373+ va_end(args);
71374+
71375+ return ret;
71376+}
71377+
71378+
71379 EXPORT_SYMBOL(__request_module);
71380 #endif /* CONFIG_MODULES */
71381
71382@@ -228,7 +281,7 @@ static int wait_for_helper(void *data)
71383 *
71384 * Thus the __user pointer cast is valid here.
71385 */
71386- sys_wait4(pid, (int __user *)&ret, 0, NULL);
71387+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
71388
71389 /*
71390 * If ret is 0, either ____call_usermodehelper failed and the
71391diff --git a/kernel/kprobes.c b/kernel/kprobes.c
71392index 5240d75..5a6fb33 100644
71393--- a/kernel/kprobes.c
71394+++ b/kernel/kprobes.c
71395@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
71396 * kernel image and loaded module images reside. This is required
71397 * so x86_64 can correctly handle the %rip-relative fixups.
71398 */
71399- kip->insns = module_alloc(PAGE_SIZE);
71400+ kip->insns = module_alloc_exec(PAGE_SIZE);
71401 if (!kip->insns) {
71402 kfree(kip);
71403 return NULL;
71404@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
71405 */
71406 if (!list_is_singular(&kprobe_insn_pages)) {
71407 list_del(&kip->list);
71408- module_free(NULL, kip->insns);
71409+ module_free_exec(NULL, kip->insns);
71410 kfree(kip);
71411 }
71412 return 1;
71413@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
71414 {
71415 int i, err = 0;
71416 unsigned long offset = 0, size = 0;
71417- char *modname, namebuf[128];
71418+ char *modname, namebuf[KSYM_NAME_LEN];
71419 const char *symbol_name;
71420 void *addr;
71421 struct kprobe_blackpoint *kb;
71422@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
71423 const char *sym = NULL;
71424 unsigned int i = *(loff_t *) v;
71425 unsigned long offset = 0;
71426- char *modname, namebuf[128];
71427+ char *modname, namebuf[KSYM_NAME_LEN];
71428
71429 head = &kprobe_table[i];
71430 preempt_disable();
71431diff --git a/kernel/lockdep.c b/kernel/lockdep.c
71432index d86fe89..d12fc66 100644
71433--- a/kernel/lockdep.c
71434+++ b/kernel/lockdep.c
71435@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
71436 /*
71437 * Various lockdep statistics:
71438 */
71439-atomic_t chain_lookup_hits;
71440-atomic_t chain_lookup_misses;
71441-atomic_t hardirqs_on_events;
71442-atomic_t hardirqs_off_events;
71443-atomic_t redundant_hardirqs_on;
71444-atomic_t redundant_hardirqs_off;
71445-atomic_t softirqs_on_events;
71446-atomic_t softirqs_off_events;
71447-atomic_t redundant_softirqs_on;
71448-atomic_t redundant_softirqs_off;
71449-atomic_t nr_unused_locks;
71450-atomic_t nr_cyclic_checks;
71451-atomic_t nr_find_usage_forwards_checks;
71452-atomic_t nr_find_usage_backwards_checks;
71453+atomic_unchecked_t chain_lookup_hits;
71454+atomic_unchecked_t chain_lookup_misses;
71455+atomic_unchecked_t hardirqs_on_events;
71456+atomic_unchecked_t hardirqs_off_events;
71457+atomic_unchecked_t redundant_hardirqs_on;
71458+atomic_unchecked_t redundant_hardirqs_off;
71459+atomic_unchecked_t softirqs_on_events;
71460+atomic_unchecked_t softirqs_off_events;
71461+atomic_unchecked_t redundant_softirqs_on;
71462+atomic_unchecked_t redundant_softirqs_off;
71463+atomic_unchecked_t nr_unused_locks;
71464+atomic_unchecked_t nr_cyclic_checks;
71465+atomic_unchecked_t nr_find_usage_forwards_checks;
71466+atomic_unchecked_t nr_find_usage_backwards_checks;
71467 #endif
71468
71469 /*
71470@@ -577,6 +577,10 @@ static int static_obj(void *obj)
71471 int i;
71472 #endif
71473
71474+#ifdef CONFIG_PAX_KERNEXEC
71475+ start = ktla_ktva(start);
71476+#endif
71477+
71478 /*
71479 * static variable?
71480 */
71481@@ -592,8 +596,7 @@ static int static_obj(void *obj)
71482 */
71483 for_each_possible_cpu(i) {
71484 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
71485- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
71486- + per_cpu_offset(i);
71487+ end = start + PERCPU_ENOUGH_ROOM;
71488
71489 if ((addr >= start) && (addr < end))
71490 return 1;
71491@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
71492 if (!static_obj(lock->key)) {
71493 debug_locks_off();
71494 printk("INFO: trying to register non-static key.\n");
71495+ printk("lock:%pS key:%pS.\n", lock, lock->key);
71496 printk("the code is fine but needs lockdep annotation.\n");
71497 printk("turning off the locking correctness validator.\n");
71498 dump_stack();
71499@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
71500 if (!class)
71501 return 0;
71502 }
71503- debug_atomic_inc((atomic_t *)&class->ops);
71504+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
71505 if (very_verbose(class)) {
71506 printk("\nacquire class [%p] %s", class->key, class->name);
71507 if (class->name_version > 1)
71508diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
71509index a2ee95a..092f0f2 100644
71510--- a/kernel/lockdep_internals.h
71511+++ b/kernel/lockdep_internals.h
71512@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
71513 /*
71514 * Various lockdep statistics:
71515 */
71516-extern atomic_t chain_lookup_hits;
71517-extern atomic_t chain_lookup_misses;
71518-extern atomic_t hardirqs_on_events;
71519-extern atomic_t hardirqs_off_events;
71520-extern atomic_t redundant_hardirqs_on;
71521-extern atomic_t redundant_hardirqs_off;
71522-extern atomic_t softirqs_on_events;
71523-extern atomic_t softirqs_off_events;
71524-extern atomic_t redundant_softirqs_on;
71525-extern atomic_t redundant_softirqs_off;
71526-extern atomic_t nr_unused_locks;
71527-extern atomic_t nr_cyclic_checks;
71528-extern atomic_t nr_cyclic_check_recursions;
71529-extern atomic_t nr_find_usage_forwards_checks;
71530-extern atomic_t nr_find_usage_forwards_recursions;
71531-extern atomic_t nr_find_usage_backwards_checks;
71532-extern atomic_t nr_find_usage_backwards_recursions;
71533-# define debug_atomic_inc(ptr) atomic_inc(ptr)
71534-# define debug_atomic_dec(ptr) atomic_dec(ptr)
71535-# define debug_atomic_read(ptr) atomic_read(ptr)
71536+extern atomic_unchecked_t chain_lookup_hits;
71537+extern atomic_unchecked_t chain_lookup_misses;
71538+extern atomic_unchecked_t hardirqs_on_events;
71539+extern atomic_unchecked_t hardirqs_off_events;
71540+extern atomic_unchecked_t redundant_hardirqs_on;
71541+extern atomic_unchecked_t redundant_hardirqs_off;
71542+extern atomic_unchecked_t softirqs_on_events;
71543+extern atomic_unchecked_t softirqs_off_events;
71544+extern atomic_unchecked_t redundant_softirqs_on;
71545+extern atomic_unchecked_t redundant_softirqs_off;
71546+extern atomic_unchecked_t nr_unused_locks;
71547+extern atomic_unchecked_t nr_cyclic_checks;
71548+extern atomic_unchecked_t nr_cyclic_check_recursions;
71549+extern atomic_unchecked_t nr_find_usage_forwards_checks;
71550+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
71551+extern atomic_unchecked_t nr_find_usage_backwards_checks;
71552+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
71553+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
71554+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
71555+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
71556 #else
71557 # define debug_atomic_inc(ptr) do { } while (0)
71558 # define debug_atomic_dec(ptr) do { } while (0)
71559diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
71560index d4aba4f..02a353f 100644
71561--- a/kernel/lockdep_proc.c
71562+++ b/kernel/lockdep_proc.c
71563@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
71564
71565 static void print_name(struct seq_file *m, struct lock_class *class)
71566 {
71567- char str[128];
71568+ char str[KSYM_NAME_LEN];
71569 const char *name = class->name;
71570
71571 if (!name) {
71572diff --git a/kernel/module.c b/kernel/module.c
71573index 4b270e6..2226274 100644
71574--- a/kernel/module.c
71575+++ b/kernel/module.c
71576@@ -55,6 +55,7 @@
71577 #include <linux/async.h>
71578 #include <linux/percpu.h>
71579 #include <linux/kmemleak.h>
71580+#include <linux/grsecurity.h>
71581
71582 #define CREATE_TRACE_POINTS
71583 #include <trace/events/module.h>
71584@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
71585 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
71586
71587 /* Bounds of module allocation, for speeding __module_address */
71588-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
71589+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
71590+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
71591
71592 int register_module_notifier(struct notifier_block * nb)
71593 {
71594@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
71595 return true;
71596
71597 list_for_each_entry_rcu(mod, &modules, list) {
71598- struct symsearch arr[] = {
71599+ struct symsearch modarr[] = {
71600 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
71601 NOT_GPL_ONLY, false },
71602 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
71603@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
71604 #endif
71605 };
71606
71607- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
71608+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
71609 return true;
71610 }
71611 return false;
71612@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
71613 void *ptr;
71614 int cpu;
71615
71616- if (align > PAGE_SIZE) {
71617+ if (align-1 >= PAGE_SIZE) {
71618 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
71619 name, align, PAGE_SIZE);
71620 align = PAGE_SIZE;
71621@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
71622 * /sys/module/foo/sections stuff
71623 * J. Corbet <corbet@lwn.net>
71624 */
71625-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
71626+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71627
71628 static inline bool sect_empty(const Elf_Shdr *sect)
71629 {
71630@@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
71631 destroy_params(mod->kp, mod->num_kp);
71632
71633 /* This may be NULL, but that's OK */
71634- module_free(mod, mod->module_init);
71635+ module_free(mod, mod->module_init_rw);
71636+ module_free_exec(mod, mod->module_init_rx);
71637 kfree(mod->args);
71638 if (mod->percpu)
71639 percpu_modfree(mod->percpu);
71640@@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
71641 percpu_modfree(mod->refptr);
71642 #endif
71643 /* Free lock-classes: */
71644- lockdep_free_key_range(mod->module_core, mod->core_size);
71645+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
71646+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
71647
71648 /* Finally, free the core (containing the module structure) */
71649- module_free(mod, mod->module_core);
71650+ module_free_exec(mod, mod->module_core_rx);
71651+ module_free(mod, mod->module_core_rw);
71652
71653 #ifdef CONFIG_MPU
71654 update_protections(current->mm);
71655@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
71656 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
71657 int ret = 0;
71658 const struct kernel_symbol *ksym;
71659+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71660+ int is_fs_load = 0;
71661+ int register_filesystem_found = 0;
71662+ char *p;
71663+
71664+ p = strstr(mod->args, "grsec_modharden_fs");
71665+
71666+ if (p) {
71667+ char *endptr = p + strlen("grsec_modharden_fs");
71668+ /* copy \0 as well */
71669+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
71670+ is_fs_load = 1;
71671+ }
71672+#endif
71673+
71674
71675 for (i = 1; i < n; i++) {
71676+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71677+ const char *name = strtab + sym[i].st_name;
71678+
71679+ /* it's a real shame this will never get ripped and copied
71680+ upstream! ;(
71681+ */
71682+ if (is_fs_load && !strcmp(name, "register_filesystem"))
71683+ register_filesystem_found = 1;
71684+#endif
71685 switch (sym[i].st_shndx) {
71686 case SHN_COMMON:
71687 /* We compiled with -fno-common. These are not
71688@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
71689 strtab + sym[i].st_name, mod);
71690 /* Ok if resolved. */
71691 if (ksym) {
71692+ pax_open_kernel();
71693 sym[i].st_value = ksym->value;
71694+ pax_close_kernel();
71695 break;
71696 }
71697
71698@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
71699 secbase = (unsigned long)mod->percpu;
71700 else
71701 secbase = sechdrs[sym[i].st_shndx].sh_addr;
71702+ pax_open_kernel();
71703 sym[i].st_value += secbase;
71704+ pax_close_kernel();
71705 break;
71706 }
71707 }
71708
71709+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71710+ if (is_fs_load && !register_filesystem_found) {
71711+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
71712+ ret = -EPERM;
71713+ }
71714+#endif
71715+
71716 return ret;
71717 }
71718
71719@@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
71720 || s->sh_entsize != ~0UL
71721 || strstarts(secstrings + s->sh_name, ".init"))
71722 continue;
71723- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
71724+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
71725+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
71726+ else
71727+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
71728 DEBUGP("\t%s\n", secstrings + s->sh_name);
71729 }
71730- if (m == 0)
71731- mod->core_text_size = mod->core_size;
71732 }
71733
71734 DEBUGP("Init section allocation order:\n");
71735@@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
71736 || s->sh_entsize != ~0UL
71737 || !strstarts(secstrings + s->sh_name, ".init"))
71738 continue;
71739- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
71740- | INIT_OFFSET_MASK);
71741+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
71742+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
71743+ else
71744+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
71745+ s->sh_entsize |= INIT_OFFSET_MASK;
71746 DEBUGP("\t%s\n", secstrings + s->sh_name);
71747 }
71748- if (m == 0)
71749- mod->init_text_size = mod->init_size;
71750 }
71751 }
71752
71753@@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
71754
71755 /* As per nm */
71756 static char elf_type(const Elf_Sym *sym,
71757- Elf_Shdr *sechdrs,
71758- const char *secstrings,
71759- struct module *mod)
71760+ const Elf_Shdr *sechdrs,
71761+ const char *secstrings)
71762 {
71763 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
71764 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
71765@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
71766
71767 /* Put symbol section at end of init part of module. */
71768 symsect->sh_flags |= SHF_ALLOC;
71769- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
71770+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
71771 symindex) | INIT_OFFSET_MASK;
71772 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
71773
71774@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
71775 }
71776
71777 /* Append room for core symbols at end of core part. */
71778- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
71779- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
71780+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
71781+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
71782
71783 /* Put string table section at end of init part of module. */
71784 strsect->sh_flags |= SHF_ALLOC;
71785- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
71786+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
71787 strindex) | INIT_OFFSET_MASK;
71788 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
71789
71790 /* Append room for core symbols' strings at end of core part. */
71791- *pstroffs = mod->core_size;
71792+ *pstroffs = mod->core_size_rx;
71793 __set_bit(0, strmap);
71794- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
71795+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
71796
71797 return symoffs;
71798 }
71799@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
71800 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
71801 mod->strtab = (void *)sechdrs[strindex].sh_addr;
71802
71803+ pax_open_kernel();
71804+
71805 /* Set types up while we still have access to sections. */
71806 for (i = 0; i < mod->num_symtab; i++)
71807 mod->symtab[i].st_info
71808- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
71809+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
71810
71811- mod->core_symtab = dst = mod->module_core + symoffs;
71812+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
71813 src = mod->symtab;
71814 *dst = *src;
71815 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
71816@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
71817 }
71818 mod->core_num_syms = ndst;
71819
71820- mod->core_strtab = s = mod->module_core + stroffs;
71821+ mod->core_strtab = s = mod->module_core_rx + stroffs;
71822 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
71823 if (test_bit(i, strmap))
71824 *++s = mod->strtab[i];
71825+
71826+ pax_close_kernel();
71827 }
71828 #else
71829 static inline unsigned long layout_symtab(struct module *mod,
71830@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
71831 #endif
71832 }
71833
71834-static void *module_alloc_update_bounds(unsigned long size)
71835+static void *module_alloc_update_bounds_rw(unsigned long size)
71836 {
71837 void *ret = module_alloc(size);
71838
71839 if (ret) {
71840 /* Update module bounds. */
71841- if ((unsigned long)ret < module_addr_min)
71842- module_addr_min = (unsigned long)ret;
71843- if ((unsigned long)ret + size > module_addr_max)
71844- module_addr_max = (unsigned long)ret + size;
71845+ if ((unsigned long)ret < module_addr_min_rw)
71846+ module_addr_min_rw = (unsigned long)ret;
71847+ if ((unsigned long)ret + size > module_addr_max_rw)
71848+ module_addr_max_rw = (unsigned long)ret + size;
71849+ }
71850+ return ret;
71851+}
71852+
71853+static void *module_alloc_update_bounds_rx(unsigned long size)
71854+{
71855+ void *ret = module_alloc_exec(size);
71856+
71857+ if (ret) {
71858+ /* Update module bounds. */
71859+ if ((unsigned long)ret < module_addr_min_rx)
71860+ module_addr_min_rx = (unsigned long)ret;
71861+ if ((unsigned long)ret + size > module_addr_max_rx)
71862+ module_addr_max_rx = (unsigned long)ret + size;
71863 }
71864 return ret;
71865 }
71866@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
71867 unsigned int i;
71868
71869 /* only scan the sections containing data */
71870- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
71871- (unsigned long)mod->module_core,
71872+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
71873+ (unsigned long)mod->module_core_rw,
71874 sizeof(struct module), GFP_KERNEL);
71875
71876 for (i = 1; i < hdr->e_shnum; i++) {
71877@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
71878 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
71879 continue;
71880
71881- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
71882- (unsigned long)mod->module_core,
71883+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
71884+ (unsigned long)mod->module_core_rw,
71885 sechdrs[i].sh_size, GFP_KERNEL);
71886 }
71887 }
71888@@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
71889 Elf_Ehdr *hdr;
71890 Elf_Shdr *sechdrs;
71891 char *secstrings, *args, *modmagic, *strtab = NULL;
71892- char *staging;
71893+ char *staging, *license;
71894 unsigned int i;
71895 unsigned int symindex = 0;
71896 unsigned int strindex = 0;
71897@@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
71898 goto free_hdr;
71899 }
71900
71901+ license = get_modinfo(sechdrs, infoindex, "license");
71902+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
71903+ if (!license || !license_is_gpl_compatible(license)) {
71904+ err -ENOEXEC;
71905+ goto free_hdr;
71906+ }
71907+#endif
71908+
71909 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
71910 /* This is allowed: modprobe --force will invalidate it. */
71911 if (!modmagic) {
71912@@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
71913 secstrings, &stroffs, strmap);
71914
71915 /* Do the allocs. */
71916- ptr = module_alloc_update_bounds(mod->core_size);
71917+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
71918 /*
71919 * The pointer to this block is stored in the module structure
71920 * which is inside the block. Just mark it as not being a
71921@@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
71922 err = -ENOMEM;
71923 goto free_percpu;
71924 }
71925- memset(ptr, 0, mod->core_size);
71926- mod->module_core = ptr;
71927+ memset(ptr, 0, mod->core_size_rw);
71928+ mod->module_core_rw = ptr;
71929
71930- ptr = module_alloc_update_bounds(mod->init_size);
71931+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
71932 /*
71933 * The pointer to this block is stored in the module structure
71934 * which is inside the block. This block doesn't need to be
71935 * scanned as it contains data and code that will be freed
71936 * after the module is initialized.
71937 */
71938- kmemleak_ignore(ptr);
71939- if (!ptr && mod->init_size) {
71940+ kmemleak_not_leak(ptr);
71941+ if (!ptr && mod->init_size_rw) {
71942+ err = -ENOMEM;
71943+ goto free_core_rw;
71944+ }
71945+ memset(ptr, 0, mod->init_size_rw);
71946+ mod->module_init_rw = ptr;
71947+
71948+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
71949+ kmemleak_not_leak(ptr);
71950+ if (!ptr) {
71951+ err = -ENOMEM;
71952+ goto free_init_rw;
71953+ }
71954+
71955+ pax_open_kernel();
71956+ memset(ptr, 0, mod->core_size_rx);
71957+ pax_close_kernel();
71958+ mod->module_core_rx = ptr;
71959+
71960+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
71961+ kmemleak_not_leak(ptr);
71962+ if (!ptr && mod->init_size_rx) {
71963 err = -ENOMEM;
71964- goto free_core;
71965+ goto free_core_rx;
71966 }
71967- memset(ptr, 0, mod->init_size);
71968- mod->module_init = ptr;
71969+
71970+ pax_open_kernel();
71971+ memset(ptr, 0, mod->init_size_rx);
71972+ pax_close_kernel();
71973+ mod->module_init_rx = ptr;
71974
71975 /* Transfer each section which specifies SHF_ALLOC */
71976 DEBUGP("final section addresses:\n");
71977@@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
71978 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
71979 continue;
71980
71981- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
71982- dest = mod->module_init
71983- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
71984- else
71985- dest = mod->module_core + sechdrs[i].sh_entsize;
71986+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
71987+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
71988+ dest = mod->module_init_rw
71989+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
71990+ else
71991+ dest = mod->module_init_rx
71992+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
71993+ } else {
71994+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
71995+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
71996+ else
71997+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
71998+ }
71999
72000- if (sechdrs[i].sh_type != SHT_NOBITS)
72001- memcpy(dest, (void *)sechdrs[i].sh_addr,
72002- sechdrs[i].sh_size);
72003+ if (sechdrs[i].sh_type != SHT_NOBITS) {
72004+
72005+#ifdef CONFIG_PAX_KERNEXEC
72006+#ifdef CONFIG_X86_64
72007+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
72008+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
72009+#endif
72010+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
72011+ pax_open_kernel();
72012+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
72013+ pax_close_kernel();
72014+ } else
72015+#endif
72016+
72017+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
72018+ }
72019 /* Update sh_addr to point to copy in image. */
72020- sechdrs[i].sh_addr = (unsigned long)dest;
72021+
72022+#ifdef CONFIG_PAX_KERNEXEC
72023+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
72024+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
72025+ else
72026+#endif
72027+
72028+ sechdrs[i].sh_addr = (unsigned long)dest;
72029 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
72030 }
72031 /* Module has been moved. */
72032@@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
72033 mod->name);
72034 if (!mod->refptr) {
72035 err = -ENOMEM;
72036- goto free_init;
72037+ goto free_init_rx;
72038 }
72039 #endif
72040 /* Now we've moved module, initialize linked lists, etc. */
72041@@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
72042 goto free_unload;
72043
72044 /* Set up license info based on the info section */
72045- set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
72046+ set_license(mod, license);
72047
72048 /*
72049 * ndiswrapper is under GPL by itself, but loads proprietary modules.
72050@@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
72051 /* Set up MODINFO_ATTR fields */
72052 setup_modinfo(mod, sechdrs, infoindex);
72053
72054+ mod->args = args;
72055+
72056+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72057+ {
72058+ char *p, *p2;
72059+
72060+ if (strstr(mod->args, "grsec_modharden_netdev")) {
72061+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
72062+ err = -EPERM;
72063+ goto cleanup;
72064+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
72065+ p += strlen("grsec_modharden_normal");
72066+ p2 = strstr(p, "_");
72067+ if (p2) {
72068+ *p2 = '\0';
72069+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
72070+ *p2 = '_';
72071+ }
72072+ err = -EPERM;
72073+ goto cleanup;
72074+ }
72075+ }
72076+#endif
72077+
72078+
72079 /* Fix up syms, so that st_value is a pointer to location. */
72080 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
72081 mod);
72082@@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
72083
72084 /* Now do relocations. */
72085 for (i = 1; i < hdr->e_shnum; i++) {
72086- const char *strtab = (char *)sechdrs[strindex].sh_addr;
72087 unsigned int info = sechdrs[i].sh_info;
72088+ strtab = (char *)sechdrs[strindex].sh_addr;
72089
72090 /* Not a valid relocation section? */
72091 if (info >= hdr->e_shnum)
72092@@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
72093 * Do it before processing of module parameters, so the module
72094 * can provide parameter accessor functions of its own.
72095 */
72096- if (mod->module_init)
72097- flush_icache_range((unsigned long)mod->module_init,
72098- (unsigned long)mod->module_init
72099- + mod->init_size);
72100- flush_icache_range((unsigned long)mod->module_core,
72101- (unsigned long)mod->module_core + mod->core_size);
72102+ if (mod->module_init_rx)
72103+ flush_icache_range((unsigned long)mod->module_init_rx,
72104+ (unsigned long)mod->module_init_rx
72105+ + mod->init_size_rx);
72106+ flush_icache_range((unsigned long)mod->module_core_rx,
72107+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
72108
72109 set_fs(old_fs);
72110
72111- mod->args = args;
72112 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
72113 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
72114 mod->name);
72115@@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
72116 free_unload:
72117 module_unload_free(mod);
72118 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
72119+ free_init_rx:
72120 percpu_modfree(mod->refptr);
72121- free_init:
72122 #endif
72123- module_free(mod, mod->module_init);
72124- free_core:
72125- module_free(mod, mod->module_core);
72126+ module_free_exec(mod, mod->module_init_rx);
72127+ free_core_rx:
72128+ module_free_exec(mod, mod->module_core_rx);
72129+ free_init_rw:
72130+ module_free(mod, mod->module_init_rw);
72131+ free_core_rw:
72132+ module_free(mod, mod->module_core_rw);
72133 /* mod will be freed with core. Don't access it beyond this line! */
72134 free_percpu:
72135 if (percpu)
72136@@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
72137 mod->symtab = mod->core_symtab;
72138 mod->strtab = mod->core_strtab;
72139 #endif
72140- module_free(mod, mod->module_init);
72141- mod->module_init = NULL;
72142- mod->init_size = 0;
72143- mod->init_text_size = 0;
72144+ module_free(mod, mod->module_init_rw);
72145+ module_free_exec(mod, mod->module_init_rx);
72146+ mod->module_init_rw = NULL;
72147+ mod->module_init_rx = NULL;
72148+ mod->init_size_rw = 0;
72149+ mod->init_size_rx = 0;
72150 mutex_unlock(&module_mutex);
72151
72152 return 0;
72153@@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
72154 unsigned long nextval;
72155
72156 /* At worse, next value is at end of module */
72157- if (within_module_init(addr, mod))
72158- nextval = (unsigned long)mod->module_init+mod->init_text_size;
72159+ if (within_module_init_rx(addr, mod))
72160+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
72161+ else if (within_module_init_rw(addr, mod))
72162+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
72163+ else if (within_module_core_rx(addr, mod))
72164+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
72165+ else if (within_module_core_rw(addr, mod))
72166+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
72167 else
72168- nextval = (unsigned long)mod->module_core+mod->core_text_size;
72169+ return NULL;
72170
72171 /* Scan for closest preceeding symbol, and next symbol. (ELF
72172 starts real symbols at 1). */
72173@@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
72174 char buf[8];
72175
72176 seq_printf(m, "%s %u",
72177- mod->name, mod->init_size + mod->core_size);
72178+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
72179 print_unload_info(m, mod);
72180
72181 /* Informative for users. */
72182@@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
72183 mod->state == MODULE_STATE_COMING ? "Loading":
72184 "Live");
72185 /* Used by oprofile and other similar tools. */
72186- seq_printf(m, " 0x%p", mod->module_core);
72187+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
72188
72189 /* Taints info */
72190 if (mod->taints)
72191@@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
72192
72193 static int __init proc_modules_init(void)
72194 {
72195+#ifndef CONFIG_GRKERNSEC_HIDESYM
72196+#ifdef CONFIG_GRKERNSEC_PROC_USER
72197+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
72198+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72199+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
72200+#else
72201 proc_create("modules", 0, NULL, &proc_modules_operations);
72202+#endif
72203+#else
72204+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
72205+#endif
72206 return 0;
72207 }
72208 module_init(proc_modules_init);
72209@@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
72210 {
72211 struct module *mod;
72212
72213- if (addr < module_addr_min || addr > module_addr_max)
72214+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
72215+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
72216 return NULL;
72217
72218 list_for_each_entry_rcu(mod, &modules, list)
72219- if (within_module_core(addr, mod)
72220- || within_module_init(addr, mod))
72221+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
72222 return mod;
72223 return NULL;
72224 }
72225@@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
72226 */
72227 struct module *__module_text_address(unsigned long addr)
72228 {
72229- struct module *mod = __module_address(addr);
72230+ struct module *mod;
72231+
72232+#ifdef CONFIG_X86_32
72233+ addr = ktla_ktva(addr);
72234+#endif
72235+
72236+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
72237+ return NULL;
72238+
72239+ mod = __module_address(addr);
72240+
72241 if (mod) {
72242 /* Make sure it's within the text section. */
72243- if (!within(addr, mod->module_init, mod->init_text_size)
72244- && !within(addr, mod->module_core, mod->core_text_size))
72245+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
72246 mod = NULL;
72247 }
72248 return mod;
72249diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
72250index ec815a9..fe46e99 100644
72251--- a/kernel/mutex-debug.c
72252+++ b/kernel/mutex-debug.c
72253@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
72254 }
72255
72256 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
72257- struct thread_info *ti)
72258+ struct task_struct *task)
72259 {
72260 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
72261
72262 /* Mark the current thread as blocked on the lock: */
72263- ti->task->blocked_on = waiter;
72264+ task->blocked_on = waiter;
72265 }
72266
72267 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
72268- struct thread_info *ti)
72269+ struct task_struct *task)
72270 {
72271 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
72272- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
72273- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
72274- ti->task->blocked_on = NULL;
72275+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
72276+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
72277+ task->blocked_on = NULL;
72278
72279 list_del_init(&waiter->list);
72280 waiter->task = NULL;
72281@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
72282 return;
72283
72284 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
72285- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
72286+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
72287 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
72288 mutex_clear_owner(lock);
72289 }
72290diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
72291index 6b2d735..372d3c4 100644
72292--- a/kernel/mutex-debug.h
72293+++ b/kernel/mutex-debug.h
72294@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
72295 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
72296 extern void debug_mutex_add_waiter(struct mutex *lock,
72297 struct mutex_waiter *waiter,
72298- struct thread_info *ti);
72299+ struct task_struct *task);
72300 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
72301- struct thread_info *ti);
72302+ struct task_struct *task);
72303 extern void debug_mutex_unlock(struct mutex *lock);
72304 extern void debug_mutex_init(struct mutex *lock, const char *name,
72305 struct lock_class_key *key);
72306
72307 static inline void mutex_set_owner(struct mutex *lock)
72308 {
72309- lock->owner = current_thread_info();
72310+ lock->owner = current;
72311 }
72312
72313 static inline void mutex_clear_owner(struct mutex *lock)
72314diff --git a/kernel/mutex.c b/kernel/mutex.c
72315index f85644c..5ee9f77 100644
72316--- a/kernel/mutex.c
72317+++ b/kernel/mutex.c
72318@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72319 */
72320
72321 for (;;) {
72322- struct thread_info *owner;
72323+ struct task_struct *owner;
72324
72325 /*
72326 * If we own the BKL, then don't spin. The owner of
72327@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72328 spin_lock_mutex(&lock->wait_lock, flags);
72329
72330 debug_mutex_lock_common(lock, &waiter);
72331- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
72332+ debug_mutex_add_waiter(lock, &waiter, task);
72333
72334 /* add waiting tasks to the end of the waitqueue (FIFO): */
72335 list_add_tail(&waiter.list, &lock->wait_list);
72336@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72337 * TASK_UNINTERRUPTIBLE case.)
72338 */
72339 if (unlikely(signal_pending_state(state, task))) {
72340- mutex_remove_waiter(lock, &waiter,
72341- task_thread_info(task));
72342+ mutex_remove_waiter(lock, &waiter, task);
72343 mutex_release(&lock->dep_map, 1, ip);
72344 spin_unlock_mutex(&lock->wait_lock, flags);
72345
72346@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72347 done:
72348 lock_acquired(&lock->dep_map, ip);
72349 /* got the lock - rejoice! */
72350- mutex_remove_waiter(lock, &waiter, current_thread_info());
72351+ mutex_remove_waiter(lock, &waiter, task);
72352 mutex_set_owner(lock);
72353
72354 /* set it to 0 if there are no waiters left: */
72355diff --git a/kernel/mutex.h b/kernel/mutex.h
72356index 67578ca..4115fbf 100644
72357--- a/kernel/mutex.h
72358+++ b/kernel/mutex.h
72359@@ -19,7 +19,7 @@
72360 #ifdef CONFIG_SMP
72361 static inline void mutex_set_owner(struct mutex *lock)
72362 {
72363- lock->owner = current_thread_info();
72364+ lock->owner = current;
72365 }
72366
72367 static inline void mutex_clear_owner(struct mutex *lock)
72368diff --git a/kernel/panic.c b/kernel/panic.c
72369index 96b45d0..45c447a 100644
72370--- a/kernel/panic.c
72371+++ b/kernel/panic.c
72372@@ -352,7 +352,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
72373 const char *board;
72374
72375 printk(KERN_WARNING "------------[ cut here ]------------\n");
72376- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
72377+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
72378 board = dmi_get_system_info(DMI_PRODUCT_NAME);
72379 if (board)
72380 printk(KERN_WARNING "Hardware name: %s\n", board);
72381@@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
72382 */
72383 void __stack_chk_fail(void)
72384 {
72385- panic("stack-protector: Kernel stack is corrupted in: %p\n",
72386+ dump_stack();
72387+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
72388 __builtin_return_address(0));
72389 }
72390 EXPORT_SYMBOL(__stack_chk_fail);
72391diff --git a/kernel/params.c b/kernel/params.c
72392index d656c27..21e452c 100644
72393--- a/kernel/params.c
72394+++ b/kernel/params.c
72395@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
72396 return ret;
72397 }
72398
72399-static struct sysfs_ops module_sysfs_ops = {
72400+static const struct sysfs_ops module_sysfs_ops = {
72401 .show = module_attr_show,
72402 .store = module_attr_store,
72403 };
72404@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
72405 return 0;
72406 }
72407
72408-static struct kset_uevent_ops module_uevent_ops = {
72409+static const struct kset_uevent_ops module_uevent_ops = {
72410 .filter = uevent_filter,
72411 };
72412
72413diff --git a/kernel/perf_event.c b/kernel/perf_event.c
72414index 37ebc14..9c121d9 100644
72415--- a/kernel/perf_event.c
72416+++ b/kernel/perf_event.c
72417@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
72418 */
72419 int sysctl_perf_event_sample_rate __read_mostly = 100000;
72420
72421-static atomic64_t perf_event_id;
72422+static atomic64_unchecked_t perf_event_id;
72423
72424 /*
72425 * Lock for (sysadmin-configurable) event reservations:
72426@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
72427 * In order to keep per-task stats reliable we need to flip the event
72428 * values when we flip the contexts.
72429 */
72430- value = atomic64_read(&next_event->count);
72431- value = atomic64_xchg(&event->count, value);
72432- atomic64_set(&next_event->count, value);
72433+ value = atomic64_read_unchecked(&next_event->count);
72434+ value = atomic64_xchg_unchecked(&event->count, value);
72435+ atomic64_set_unchecked(&next_event->count, value);
72436
72437 swap(event->total_time_enabled, next_event->total_time_enabled);
72438 swap(event->total_time_running, next_event->total_time_running);
72439@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
72440 update_event_times(event);
72441 }
72442
72443- return atomic64_read(&event->count);
72444+ return atomic64_read_unchecked(&event->count);
72445 }
72446
72447 /*
72448@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
72449 values[n++] = 1 + leader->nr_siblings;
72450 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
72451 values[n++] = leader->total_time_enabled +
72452- atomic64_read(&leader->child_total_time_enabled);
72453+ atomic64_read_unchecked(&leader->child_total_time_enabled);
72454 }
72455 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
72456 values[n++] = leader->total_time_running +
72457- atomic64_read(&leader->child_total_time_running);
72458+ atomic64_read_unchecked(&leader->child_total_time_running);
72459 }
72460
72461 size = n * sizeof(u64);
72462@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
72463 values[n++] = perf_event_read_value(event);
72464 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
72465 values[n++] = event->total_time_enabled +
72466- atomic64_read(&event->child_total_time_enabled);
72467+ atomic64_read_unchecked(&event->child_total_time_enabled);
72468 }
72469 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
72470 values[n++] = event->total_time_running +
72471- atomic64_read(&event->child_total_time_running);
72472+ atomic64_read_unchecked(&event->child_total_time_running);
72473 }
72474 if (read_format & PERF_FORMAT_ID)
72475 values[n++] = primary_event_id(event);
72476@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
72477 static void perf_event_reset(struct perf_event *event)
72478 {
72479 (void)perf_event_read(event);
72480- atomic64_set(&event->count, 0);
72481+ atomic64_set_unchecked(&event->count, 0);
72482 perf_event_update_userpage(event);
72483 }
72484
72485@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
72486 ++userpg->lock;
72487 barrier();
72488 userpg->index = perf_event_index(event);
72489- userpg->offset = atomic64_read(&event->count);
72490+ userpg->offset = atomic64_read_unchecked(&event->count);
72491 if (event->state == PERF_EVENT_STATE_ACTIVE)
72492- userpg->offset -= atomic64_read(&event->hw.prev_count);
72493+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
72494
72495 userpg->time_enabled = event->total_time_enabled +
72496- atomic64_read(&event->child_total_time_enabled);
72497+ atomic64_read_unchecked(&event->child_total_time_enabled);
72498
72499 userpg->time_running = event->total_time_running +
72500- atomic64_read(&event->child_total_time_running);
72501+ atomic64_read_unchecked(&event->child_total_time_running);
72502
72503 barrier();
72504 ++userpg->lock;
72505@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
72506 u64 values[4];
72507 int n = 0;
72508
72509- values[n++] = atomic64_read(&event->count);
72510+ values[n++] = atomic64_read_unchecked(&event->count);
72511 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
72512 values[n++] = event->total_time_enabled +
72513- atomic64_read(&event->child_total_time_enabled);
72514+ atomic64_read_unchecked(&event->child_total_time_enabled);
72515 }
72516 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
72517 values[n++] = event->total_time_running +
72518- atomic64_read(&event->child_total_time_running);
72519+ atomic64_read_unchecked(&event->child_total_time_running);
72520 }
72521 if (read_format & PERF_FORMAT_ID)
72522 values[n++] = primary_event_id(event);
72523@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
72524 if (leader != event)
72525 leader->pmu->read(leader);
72526
72527- values[n++] = atomic64_read(&leader->count);
72528+ values[n++] = atomic64_read_unchecked(&leader->count);
72529 if (read_format & PERF_FORMAT_ID)
72530 values[n++] = primary_event_id(leader);
72531
72532@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
72533 if (sub != event)
72534 sub->pmu->read(sub);
72535
72536- values[n++] = atomic64_read(&sub->count);
72537+ values[n++] = atomic64_read_unchecked(&sub->count);
72538 if (read_format & PERF_FORMAT_ID)
72539 values[n++] = primary_event_id(sub);
72540
72541@@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
72542 * need to add enough zero bytes after the string to handle
72543 * the 64bit alignment we do later.
72544 */
72545- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
72546+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
72547 if (!buf) {
72548 name = strncpy(tmp, "//enomem", sizeof(tmp));
72549 goto got_name;
72550 }
72551- name = d_path(&file->f_path, buf, PATH_MAX);
72552+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
72553 if (IS_ERR(name)) {
72554 name = strncpy(tmp, "//toolong", sizeof(tmp));
72555 goto got_name;
72556@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
72557 {
72558 struct hw_perf_event *hwc = &event->hw;
72559
72560- atomic64_add(nr, &event->count);
72561+ atomic64_add_unchecked(nr, &event->count);
72562
72563 if (!hwc->sample_period)
72564 return;
72565@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
72566 u64 now;
72567
72568 now = cpu_clock(cpu);
72569- prev = atomic64_read(&event->hw.prev_count);
72570- atomic64_set(&event->hw.prev_count, now);
72571- atomic64_add(now - prev, &event->count);
72572+ prev = atomic64_read_unchecked(&event->hw.prev_count);
72573+ atomic64_set_unchecked(&event->hw.prev_count, now);
72574+ atomic64_add_unchecked(now - prev, &event->count);
72575 }
72576
72577 static int cpu_clock_perf_event_enable(struct perf_event *event)
72578@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
72579 struct hw_perf_event *hwc = &event->hw;
72580 int cpu = raw_smp_processor_id();
72581
72582- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
72583+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
72584 perf_swevent_start_hrtimer(event);
72585
72586 return 0;
72587@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
72588 u64 prev;
72589 s64 delta;
72590
72591- prev = atomic64_xchg(&event->hw.prev_count, now);
72592+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
72593 delta = now - prev;
72594- atomic64_add(delta, &event->count);
72595+ atomic64_add_unchecked(delta, &event->count);
72596 }
72597
72598 static int task_clock_perf_event_enable(struct perf_event *event)
72599@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
72600
72601 now = event->ctx->time;
72602
72603- atomic64_set(&hwc->prev_count, now);
72604+ atomic64_set_unchecked(&hwc->prev_count, now);
72605
72606 perf_swevent_start_hrtimer(event);
72607
72608@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
72609 event->parent = parent_event;
72610
72611 event->ns = get_pid_ns(current->nsproxy->pid_ns);
72612- event->id = atomic64_inc_return(&perf_event_id);
72613+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
72614
72615 event->state = PERF_EVENT_STATE_INACTIVE;
72616
72617@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
72618 if (child_event->attr.inherit_stat)
72619 perf_event_read_event(child_event, child);
72620
72621- child_val = atomic64_read(&child_event->count);
72622+ child_val = atomic64_read_unchecked(&child_event->count);
72623
72624 /*
72625 * Add back the child's count to the parent's count:
72626 */
72627- atomic64_add(child_val, &parent_event->count);
72628- atomic64_add(child_event->total_time_enabled,
72629+ atomic64_add_unchecked(child_val, &parent_event->count);
72630+ atomic64_add_unchecked(child_event->total_time_enabled,
72631 &parent_event->child_total_time_enabled);
72632- atomic64_add(child_event->total_time_running,
72633+ atomic64_add_unchecked(child_event->total_time_running,
72634 &parent_event->child_total_time_running);
72635
72636 /*
72637diff --git a/kernel/pid.c b/kernel/pid.c
72638index fce7198..4f23a7e 100644
72639--- a/kernel/pid.c
72640+++ b/kernel/pid.c
72641@@ -33,6 +33,7 @@
72642 #include <linux/rculist.h>
72643 #include <linux/bootmem.h>
72644 #include <linux/hash.h>
72645+#include <linux/security.h>
72646 #include <linux/pid_namespace.h>
72647 #include <linux/init_task.h>
72648 #include <linux/syscalls.h>
72649@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
72650
72651 int pid_max = PID_MAX_DEFAULT;
72652
72653-#define RESERVED_PIDS 300
72654+#define RESERVED_PIDS 500
72655
72656 int pid_max_min = RESERVED_PIDS + 1;
72657 int pid_max_max = PID_MAX_LIMIT;
72658@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
72659 */
72660 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
72661 {
72662- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
72663+ struct task_struct *task;
72664+
72665+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
72666+
72667+ if (gr_pid_is_chrooted(task))
72668+ return NULL;
72669+
72670+ return task;
72671 }
72672
72673 struct task_struct *find_task_by_vpid(pid_t vnr)
72674@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
72675 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
72676 }
72677
72678+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
72679+{
72680+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
72681+}
72682+
72683 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
72684 {
72685 struct pid *pid;
72686diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
72687index 5c9dc22..d271117 100644
72688--- a/kernel/posix-cpu-timers.c
72689+++ b/kernel/posix-cpu-timers.c
72690@@ -6,6 +6,7 @@
72691 #include <linux/posix-timers.h>
72692 #include <linux/errno.h>
72693 #include <linux/math64.h>
72694+#include <linux/security.h>
72695 #include <asm/uaccess.h>
72696 #include <linux/kernel_stat.h>
72697 #include <trace/events/timer.h>
72698@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
72699
72700 static __init int init_posix_cpu_timers(void)
72701 {
72702- struct k_clock process = {
72703+ static struct k_clock process = {
72704 .clock_getres = process_cpu_clock_getres,
72705 .clock_get = process_cpu_clock_get,
72706 .clock_set = do_posix_clock_nosettime,
72707@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
72708 .nsleep = process_cpu_nsleep,
72709 .nsleep_restart = process_cpu_nsleep_restart,
72710 };
72711- struct k_clock thread = {
72712+ static struct k_clock thread = {
72713 .clock_getres = thread_cpu_clock_getres,
72714 .clock_get = thread_cpu_clock_get,
72715 .clock_set = do_posix_clock_nosettime,
72716diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
72717index 5e76d22..cf1baeb 100644
72718--- a/kernel/posix-timers.c
72719+++ b/kernel/posix-timers.c
72720@@ -42,6 +42,7 @@
72721 #include <linux/compiler.h>
72722 #include <linux/idr.h>
72723 #include <linux/posix-timers.h>
72724+#include <linux/grsecurity.h>
72725 #include <linux/syscalls.h>
72726 #include <linux/wait.h>
72727 #include <linux/workqueue.h>
72728@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
72729 * which we beg off on and pass to do_sys_settimeofday().
72730 */
72731
72732-static struct k_clock posix_clocks[MAX_CLOCKS];
72733+static struct k_clock *posix_clocks[MAX_CLOCKS];
72734
72735 /*
72736 * These ones are defined below.
72737@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
72738 */
72739 #define CLOCK_DISPATCH(clock, call, arglist) \
72740 ((clock) < 0 ? posix_cpu_##call arglist : \
72741- (posix_clocks[clock].call != NULL \
72742- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
72743+ (posix_clocks[clock]->call != NULL \
72744+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
72745
72746 /*
72747 * Default clock hook functions when the struct k_clock passed
72748@@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
72749 struct timespec *tp)
72750 {
72751 tp->tv_sec = 0;
72752- tp->tv_nsec = posix_clocks[which_clock].res;
72753+ tp->tv_nsec = posix_clocks[which_clock]->res;
72754 return 0;
72755 }
72756
72757@@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
72758 return 0;
72759 if ((unsigned) which_clock >= MAX_CLOCKS)
72760 return 1;
72761- if (posix_clocks[which_clock].clock_getres != NULL)
72762+ if (posix_clocks[which_clock] == NULL)
72763 return 0;
72764- if (posix_clocks[which_clock].res != 0)
72765+ if (posix_clocks[which_clock]->clock_getres != NULL)
72766+ return 0;
72767+ if (posix_clocks[which_clock]->res != 0)
72768 return 0;
72769 return 1;
72770 }
72771@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
72772 */
72773 static __init int init_posix_timers(void)
72774 {
72775- struct k_clock clock_realtime = {
72776+ static struct k_clock clock_realtime = {
72777 .clock_getres = hrtimer_get_res,
72778 };
72779- struct k_clock clock_monotonic = {
72780+ static struct k_clock clock_monotonic = {
72781 .clock_getres = hrtimer_get_res,
72782 .clock_get = posix_ktime_get_ts,
72783 .clock_set = do_posix_clock_nosettime,
72784 };
72785- struct k_clock clock_monotonic_raw = {
72786+ static struct k_clock clock_monotonic_raw = {
72787 .clock_getres = hrtimer_get_res,
72788 .clock_get = posix_get_monotonic_raw,
72789 .clock_set = do_posix_clock_nosettime,
72790 .timer_create = no_timer_create,
72791 .nsleep = no_nsleep,
72792 };
72793- struct k_clock clock_realtime_coarse = {
72794+ static struct k_clock clock_realtime_coarse = {
72795 .clock_getres = posix_get_coarse_res,
72796 .clock_get = posix_get_realtime_coarse,
72797 .clock_set = do_posix_clock_nosettime,
72798 .timer_create = no_timer_create,
72799 .nsleep = no_nsleep,
72800 };
72801- struct k_clock clock_monotonic_coarse = {
72802+ static struct k_clock clock_monotonic_coarse = {
72803 .clock_getres = posix_get_coarse_res,
72804 .clock_get = posix_get_monotonic_coarse,
72805 .clock_set = do_posix_clock_nosettime,
72806@@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
72807 .nsleep = no_nsleep,
72808 };
72809
72810+ pax_track_stack();
72811+
72812 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
72813 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
72814 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
72815@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
72816 return;
72817 }
72818
72819- posix_clocks[clock_id] = *new_clock;
72820+ posix_clocks[clock_id] = new_clock;
72821 }
72822 EXPORT_SYMBOL_GPL(register_posix_clock);
72823
72824@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
72825 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
72826 return -EFAULT;
72827
72828+ /* only the CLOCK_REALTIME clock can be set, all other clocks
72829+ have their clock_set fptr set to a nosettime dummy function
72830+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
72831+ call common_clock_set, which calls do_sys_settimeofday, which
72832+ we hook
72833+ */
72834+
72835 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
72836 }
72837
72838diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
72839index 04a9e90..bc355aa 100644
72840--- a/kernel/power/hibernate.c
72841+++ b/kernel/power/hibernate.c
72842@@ -48,14 +48,14 @@ enum {
72843
72844 static int hibernation_mode = HIBERNATION_SHUTDOWN;
72845
72846-static struct platform_hibernation_ops *hibernation_ops;
72847+static const struct platform_hibernation_ops *hibernation_ops;
72848
72849 /**
72850 * hibernation_set_ops - set the global hibernate operations
72851 * @ops: the hibernation operations to use in subsequent hibernation transitions
72852 */
72853
72854-void hibernation_set_ops(struct platform_hibernation_ops *ops)
72855+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
72856 {
72857 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
72858 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
72859diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
72860index e8b3370..484c2e4 100644
72861--- a/kernel/power/poweroff.c
72862+++ b/kernel/power/poweroff.c
72863@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
72864 .enable_mask = SYSRQ_ENABLE_BOOT,
72865 };
72866
72867-static int pm_sysrq_init(void)
72868+static int __init pm_sysrq_init(void)
72869 {
72870 register_sysrq_key('o', &sysrq_poweroff_op);
72871 return 0;
72872diff --git a/kernel/power/process.c b/kernel/power/process.c
72873index e7cd671..56d5f459 100644
72874--- a/kernel/power/process.c
72875+++ b/kernel/power/process.c
72876@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
72877 struct timeval start, end;
72878 u64 elapsed_csecs64;
72879 unsigned int elapsed_csecs;
72880+ bool timedout = false;
72881
72882 do_gettimeofday(&start);
72883
72884 end_time = jiffies + TIMEOUT;
72885 do {
72886 todo = 0;
72887+ if (time_after(jiffies, end_time))
72888+ timedout = true;
72889 read_lock(&tasklist_lock);
72890 do_each_thread(g, p) {
72891 if (frozen(p) || !freezeable(p))
72892@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
72893 * It is "frozen enough". If the task does wake
72894 * up, it will immediately call try_to_freeze.
72895 */
72896- if (!task_is_stopped_or_traced(p) &&
72897- !freezer_should_skip(p))
72898+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
72899 todo++;
72900+ if (timedout) {
72901+ printk(KERN_ERR "Task refusing to freeze:\n");
72902+ sched_show_task(p);
72903+ }
72904+ }
72905 } while_each_thread(g, p);
72906 read_unlock(&tasklist_lock);
72907 yield(); /* Yield is okay here */
72908- if (time_after(jiffies, end_time))
72909- break;
72910- } while (todo);
72911+ } while (todo && !timedout);
72912
72913 do_gettimeofday(&end);
72914 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
72915diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
72916index 40dd021..fb30ceb 100644
72917--- a/kernel/power/suspend.c
72918+++ b/kernel/power/suspend.c
72919@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
72920 [PM_SUSPEND_MEM] = "mem",
72921 };
72922
72923-static struct platform_suspend_ops *suspend_ops;
72924+static const struct platform_suspend_ops *suspend_ops;
72925
72926 /**
72927 * suspend_set_ops - Set the global suspend method table.
72928 * @ops: Pointer to ops structure.
72929 */
72930-void suspend_set_ops(struct platform_suspend_ops *ops)
72931+void suspend_set_ops(const struct platform_suspend_ops *ops)
72932 {
72933 mutex_lock(&pm_mutex);
72934 suspend_ops = ops;
72935diff --git a/kernel/printk.c b/kernel/printk.c
72936index 4cade47..637e78a 100644
72937--- a/kernel/printk.c
72938+++ b/kernel/printk.c
72939@@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf, int len)
72940 char c;
72941 int error = 0;
72942
72943+#ifdef CONFIG_GRKERNSEC_DMESG
72944+ if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
72945+ return -EPERM;
72946+#endif
72947+
72948 error = security_syslog(type);
72949 if (error)
72950 return error;
72951diff --git a/kernel/profile.c b/kernel/profile.c
72952index dfadc5b..7f59404 100644
72953--- a/kernel/profile.c
72954+++ b/kernel/profile.c
72955@@ -39,7 +39,7 @@ struct profile_hit {
72956 /* Oprofile timer tick hook */
72957 static int (*timer_hook)(struct pt_regs *) __read_mostly;
72958
72959-static atomic_t *prof_buffer;
72960+static atomic_unchecked_t *prof_buffer;
72961 static unsigned long prof_len, prof_shift;
72962
72963 int prof_on __read_mostly;
72964@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
72965 hits[i].pc = 0;
72966 continue;
72967 }
72968- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
72969+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
72970 hits[i].hits = hits[i].pc = 0;
72971 }
72972 }
72973@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
72974 * Add the current hit(s) and flush the write-queue out
72975 * to the global buffer:
72976 */
72977- atomic_add(nr_hits, &prof_buffer[pc]);
72978+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
72979 for (i = 0; i < NR_PROFILE_HIT; ++i) {
72980- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
72981+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
72982 hits[i].pc = hits[i].hits = 0;
72983 }
72984 out:
72985@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
72986 if (prof_on != type || !prof_buffer)
72987 return;
72988 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
72989- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
72990+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
72991 }
72992 #endif /* !CONFIG_SMP */
72993 EXPORT_SYMBOL_GPL(profile_hits);
72994@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
72995 return -EFAULT;
72996 buf++; p++; count--; read++;
72997 }
72998- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
72999+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
73000 if (copy_to_user(buf, (void *)pnt, count))
73001 return -EFAULT;
73002 read += count;
73003@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
73004 }
73005 #endif
73006 profile_discard_flip_buffers();
73007- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
73008+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
73009 return count;
73010 }
73011
73012diff --git a/kernel/ptrace.c b/kernel/ptrace.c
73013index 05625f6..733bf70 100644
73014--- a/kernel/ptrace.c
73015+++ b/kernel/ptrace.c
73016@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
73017 return ret;
73018 }
73019
73020-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
73021+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
73022+ unsigned int log)
73023 {
73024 const struct cred *cred = current_cred(), *tcred;
73025
73026@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
73027 cred->gid != tcred->egid ||
73028 cred->gid != tcred->sgid ||
73029 cred->gid != tcred->gid) &&
73030- !capable(CAP_SYS_PTRACE)) {
73031+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
73032+ (log && !capable(CAP_SYS_PTRACE)))
73033+ ) {
73034 rcu_read_unlock();
73035 return -EPERM;
73036 }
73037@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
73038 smp_rmb();
73039 if (task->mm)
73040 dumpable = get_dumpable(task->mm);
73041- if (!dumpable && !capable(CAP_SYS_PTRACE))
73042+ if (!dumpable &&
73043+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
73044+ (log && !capable(CAP_SYS_PTRACE))))
73045 return -EPERM;
73046
73047 return security_ptrace_access_check(task, mode);
73048@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
73049 {
73050 int err;
73051 task_lock(task);
73052- err = __ptrace_may_access(task, mode);
73053+ err = __ptrace_may_access(task, mode, 0);
73054+ task_unlock(task);
73055+ return !err;
73056+}
73057+
73058+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
73059+{
73060+ int err;
73061+ task_lock(task);
73062+ err = __ptrace_may_access(task, mode, 1);
73063 task_unlock(task);
73064 return !err;
73065 }
73066@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
73067 goto out;
73068
73069 task_lock(task);
73070- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
73071+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
73072 task_unlock(task);
73073 if (retval)
73074 goto unlock_creds;
73075@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
73076 goto unlock_tasklist;
73077
73078 task->ptrace = PT_PTRACED;
73079- if (capable(CAP_SYS_PTRACE))
73080+ if (capable_nolog(CAP_SYS_PTRACE))
73081 task->ptrace |= PT_PTRACE_CAP;
73082
73083 __ptrace_link(task, current);
73084@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
73085 {
73086 int copied = 0;
73087
73088+ pax_track_stack();
73089+
73090 while (len > 0) {
73091 char buf[128];
73092 int this_len, retval;
73093@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
73094 {
73095 int copied = 0;
73096
73097+ pax_track_stack();
73098+
73099 while (len > 0) {
73100 char buf[128];
73101 int this_len, retval;
73102@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
73103 int ret = -EIO;
73104 siginfo_t siginfo;
73105
73106+ pax_track_stack();
73107+
73108 switch (request) {
73109 case PTRACE_PEEKTEXT:
73110 case PTRACE_PEEKDATA:
73111@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
73112 ret = ptrace_setoptions(child, data);
73113 break;
73114 case PTRACE_GETEVENTMSG:
73115- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
73116+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
73117 break;
73118
73119 case PTRACE_GETSIGINFO:
73120 ret = ptrace_getsiginfo(child, &siginfo);
73121 if (!ret)
73122- ret = copy_siginfo_to_user((siginfo_t __user *) data,
73123+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
73124 &siginfo);
73125 break;
73126
73127 case PTRACE_SETSIGINFO:
73128- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
73129+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
73130 sizeof siginfo))
73131 ret = -EFAULT;
73132 else
73133@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
73134 goto out;
73135 }
73136
73137+ if (gr_handle_ptrace(child, request)) {
73138+ ret = -EPERM;
73139+ goto out_put_task_struct;
73140+ }
73141+
73142 if (request == PTRACE_ATTACH) {
73143 ret = ptrace_attach(child);
73144 /*
73145 * Some architectures need to do book-keeping after
73146 * a ptrace attach.
73147 */
73148- if (!ret)
73149+ if (!ret) {
73150 arch_ptrace_attach(child);
73151+ gr_audit_ptrace(child);
73152+ }
73153 goto out_put_task_struct;
73154 }
73155
73156@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
73157 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
73158 if (copied != sizeof(tmp))
73159 return -EIO;
73160- return put_user(tmp, (unsigned long __user *)data);
73161+ return put_user(tmp, (__force unsigned long __user *)data);
73162 }
73163
73164 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
73165@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
73166 siginfo_t siginfo;
73167 int ret;
73168
73169+ pax_track_stack();
73170+
73171 switch (request) {
73172 case PTRACE_PEEKTEXT:
73173 case PTRACE_PEEKDATA:
73174@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
73175 goto out;
73176 }
73177
73178+ if (gr_handle_ptrace(child, request)) {
73179+ ret = -EPERM;
73180+ goto out_put_task_struct;
73181+ }
73182+
73183 if (request == PTRACE_ATTACH) {
73184 ret = ptrace_attach(child);
73185 /*
73186 * Some architectures need to do book-keeping after
73187 * a ptrace attach.
73188 */
73189- if (!ret)
73190+ if (!ret) {
73191 arch_ptrace_attach(child);
73192+ gr_audit_ptrace(child);
73193+ }
73194 goto out_put_task_struct;
73195 }
73196
73197diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
73198index 697c0a0..2402696 100644
73199--- a/kernel/rcutorture.c
73200+++ b/kernel/rcutorture.c
73201@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
73202 { 0 };
73203 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
73204 { 0 };
73205-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
73206-static atomic_t n_rcu_torture_alloc;
73207-static atomic_t n_rcu_torture_alloc_fail;
73208-static atomic_t n_rcu_torture_free;
73209-static atomic_t n_rcu_torture_mberror;
73210-static atomic_t n_rcu_torture_error;
73211+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
73212+static atomic_unchecked_t n_rcu_torture_alloc;
73213+static atomic_unchecked_t n_rcu_torture_alloc_fail;
73214+static atomic_unchecked_t n_rcu_torture_free;
73215+static atomic_unchecked_t n_rcu_torture_mberror;
73216+static atomic_unchecked_t n_rcu_torture_error;
73217 static long n_rcu_torture_timers;
73218 static struct list_head rcu_torture_removed;
73219 static cpumask_var_t shuffle_tmp_mask;
73220@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
73221
73222 spin_lock_bh(&rcu_torture_lock);
73223 if (list_empty(&rcu_torture_freelist)) {
73224- atomic_inc(&n_rcu_torture_alloc_fail);
73225+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
73226 spin_unlock_bh(&rcu_torture_lock);
73227 return NULL;
73228 }
73229- atomic_inc(&n_rcu_torture_alloc);
73230+ atomic_inc_unchecked(&n_rcu_torture_alloc);
73231 p = rcu_torture_freelist.next;
73232 list_del_init(p);
73233 spin_unlock_bh(&rcu_torture_lock);
73234@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
73235 static void
73236 rcu_torture_free(struct rcu_torture *p)
73237 {
73238- atomic_inc(&n_rcu_torture_free);
73239+ atomic_inc_unchecked(&n_rcu_torture_free);
73240 spin_lock_bh(&rcu_torture_lock);
73241 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
73242 spin_unlock_bh(&rcu_torture_lock);
73243@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
73244 i = rp->rtort_pipe_count;
73245 if (i > RCU_TORTURE_PIPE_LEN)
73246 i = RCU_TORTURE_PIPE_LEN;
73247- atomic_inc(&rcu_torture_wcount[i]);
73248+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
73249 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
73250 rp->rtort_mbtest = 0;
73251 rcu_torture_free(rp);
73252@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
73253 i = rp->rtort_pipe_count;
73254 if (i > RCU_TORTURE_PIPE_LEN)
73255 i = RCU_TORTURE_PIPE_LEN;
73256- atomic_inc(&rcu_torture_wcount[i]);
73257+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
73258 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
73259 rp->rtort_mbtest = 0;
73260 list_del(&rp->rtort_free);
73261@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
73262 i = old_rp->rtort_pipe_count;
73263 if (i > RCU_TORTURE_PIPE_LEN)
73264 i = RCU_TORTURE_PIPE_LEN;
73265- atomic_inc(&rcu_torture_wcount[i]);
73266+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
73267 old_rp->rtort_pipe_count++;
73268 cur_ops->deferred_free(old_rp);
73269 }
73270@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
73271 return;
73272 }
73273 if (p->rtort_mbtest == 0)
73274- atomic_inc(&n_rcu_torture_mberror);
73275+ atomic_inc_unchecked(&n_rcu_torture_mberror);
73276 spin_lock(&rand_lock);
73277 cur_ops->read_delay(&rand);
73278 n_rcu_torture_timers++;
73279@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
73280 continue;
73281 }
73282 if (p->rtort_mbtest == 0)
73283- atomic_inc(&n_rcu_torture_mberror);
73284+ atomic_inc_unchecked(&n_rcu_torture_mberror);
73285 cur_ops->read_delay(&rand);
73286 preempt_disable();
73287 pipe_count = p->rtort_pipe_count;
73288@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
73289 rcu_torture_current,
73290 rcu_torture_current_version,
73291 list_empty(&rcu_torture_freelist),
73292- atomic_read(&n_rcu_torture_alloc),
73293- atomic_read(&n_rcu_torture_alloc_fail),
73294- atomic_read(&n_rcu_torture_free),
73295- atomic_read(&n_rcu_torture_mberror),
73296+ atomic_read_unchecked(&n_rcu_torture_alloc),
73297+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
73298+ atomic_read_unchecked(&n_rcu_torture_free),
73299+ atomic_read_unchecked(&n_rcu_torture_mberror),
73300 n_rcu_torture_timers);
73301- if (atomic_read(&n_rcu_torture_mberror) != 0)
73302+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
73303 cnt += sprintf(&page[cnt], " !!!");
73304 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
73305 if (i > 1) {
73306 cnt += sprintf(&page[cnt], "!!! ");
73307- atomic_inc(&n_rcu_torture_error);
73308+ atomic_inc_unchecked(&n_rcu_torture_error);
73309 WARN_ON_ONCE(1);
73310 }
73311 cnt += sprintf(&page[cnt], "Reader Pipe: ");
73312@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
73313 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
73314 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
73315 cnt += sprintf(&page[cnt], " %d",
73316- atomic_read(&rcu_torture_wcount[i]));
73317+ atomic_read_unchecked(&rcu_torture_wcount[i]));
73318 }
73319 cnt += sprintf(&page[cnt], "\n");
73320 if (cur_ops->stats)
73321@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
73322
73323 if (cur_ops->cleanup)
73324 cur_ops->cleanup();
73325- if (atomic_read(&n_rcu_torture_error))
73326+ if (atomic_read_unchecked(&n_rcu_torture_error))
73327 rcu_torture_print_module_parms("End of test: FAILURE");
73328 else
73329 rcu_torture_print_module_parms("End of test: SUCCESS");
73330@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
73331
73332 rcu_torture_current = NULL;
73333 rcu_torture_current_version = 0;
73334- atomic_set(&n_rcu_torture_alloc, 0);
73335- atomic_set(&n_rcu_torture_alloc_fail, 0);
73336- atomic_set(&n_rcu_torture_free, 0);
73337- atomic_set(&n_rcu_torture_mberror, 0);
73338- atomic_set(&n_rcu_torture_error, 0);
73339+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
73340+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
73341+ atomic_set_unchecked(&n_rcu_torture_free, 0);
73342+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
73343+ atomic_set_unchecked(&n_rcu_torture_error, 0);
73344 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
73345- atomic_set(&rcu_torture_wcount[i], 0);
73346+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
73347 for_each_possible_cpu(cpu) {
73348 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
73349 per_cpu(rcu_torture_count, cpu)[i] = 0;
73350diff --git a/kernel/rcutree.c b/kernel/rcutree.c
73351index 683c4f3..97f54c6 100644
73352--- a/kernel/rcutree.c
73353+++ b/kernel/rcutree.c
73354@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
73355 /*
73356 * Do softirq processing for the current CPU.
73357 */
73358-static void rcu_process_callbacks(struct softirq_action *unused)
73359+static void rcu_process_callbacks(void)
73360 {
73361 /*
73362 * Memory references from any prior RCU read-side critical sections
73363diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
73364index c03edf7..ac1b341 100644
73365--- a/kernel/rcutree_plugin.h
73366+++ b/kernel/rcutree_plugin.h
73367@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
73368 */
73369 void __rcu_read_lock(void)
73370 {
73371- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
73372+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
73373 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
73374 }
73375 EXPORT_SYMBOL_GPL(__rcu_read_lock);
73376@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
73377 struct task_struct *t = current;
73378
73379 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
73380- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
73381+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
73382 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
73383 rcu_read_unlock_special(t);
73384 }
73385diff --git a/kernel/relay.c b/kernel/relay.c
73386index 760c262..a9fd241 100644
73387--- a/kernel/relay.c
73388+++ b/kernel/relay.c
73389@@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct file *in,
73390 unsigned int flags,
73391 int *nonpad_ret)
73392 {
73393- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
73394+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
73395 struct rchan_buf *rbuf = in->private_data;
73396 unsigned int subbuf_size = rbuf->chan->subbuf_size;
73397 uint64_t pos = (uint64_t) *ppos;
73398@@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct file *in,
73399 .ops = &relay_pipe_buf_ops,
73400 .spd_release = relay_page_release,
73401 };
73402+ ssize_t ret;
73403+
73404+ pax_track_stack();
73405
73406 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
73407 return 0;
73408diff --git a/kernel/resource.c b/kernel/resource.c
73409index fb11a58..4e61ae1 100644
73410--- a/kernel/resource.c
73411+++ b/kernel/resource.c
73412@@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
73413
73414 static int __init ioresources_init(void)
73415 {
73416+#ifdef CONFIG_GRKERNSEC_PROC_ADD
73417+#ifdef CONFIG_GRKERNSEC_PROC_USER
73418+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
73419+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
73420+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73421+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
73422+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
73423+#endif
73424+#else
73425 proc_create("ioports", 0, NULL, &proc_ioports_operations);
73426 proc_create("iomem", 0, NULL, &proc_iomem_operations);
73427+#endif
73428 return 0;
73429 }
73430 __initcall(ioresources_init);
73431diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
73432index a56f629..1fc4989 100644
73433--- a/kernel/rtmutex-tester.c
73434+++ b/kernel/rtmutex-tester.c
73435@@ -21,7 +21,7 @@
73436 #define MAX_RT_TEST_MUTEXES 8
73437
73438 static spinlock_t rttest_lock;
73439-static atomic_t rttest_event;
73440+static atomic_unchecked_t rttest_event;
73441
73442 struct test_thread_data {
73443 int opcode;
73444@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73445
73446 case RTTEST_LOCKCONT:
73447 td->mutexes[td->opdata] = 1;
73448- td->event = atomic_add_return(1, &rttest_event);
73449+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73450 return 0;
73451
73452 case RTTEST_RESET:
73453@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73454 return 0;
73455
73456 case RTTEST_RESETEVENT:
73457- atomic_set(&rttest_event, 0);
73458+ atomic_set_unchecked(&rttest_event, 0);
73459 return 0;
73460
73461 default:
73462@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73463 return ret;
73464
73465 td->mutexes[id] = 1;
73466- td->event = atomic_add_return(1, &rttest_event);
73467+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73468 rt_mutex_lock(&mutexes[id]);
73469- td->event = atomic_add_return(1, &rttest_event);
73470+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73471 td->mutexes[id] = 4;
73472 return 0;
73473
73474@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73475 return ret;
73476
73477 td->mutexes[id] = 1;
73478- td->event = atomic_add_return(1, &rttest_event);
73479+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73480 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
73481- td->event = atomic_add_return(1, &rttest_event);
73482+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73483 td->mutexes[id] = ret ? 0 : 4;
73484 return ret ? -EINTR : 0;
73485
73486@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73487 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
73488 return ret;
73489
73490- td->event = atomic_add_return(1, &rttest_event);
73491+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73492 rt_mutex_unlock(&mutexes[id]);
73493- td->event = atomic_add_return(1, &rttest_event);
73494+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73495 td->mutexes[id] = 0;
73496 return 0;
73497
73498@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
73499 break;
73500
73501 td->mutexes[dat] = 2;
73502- td->event = atomic_add_return(1, &rttest_event);
73503+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73504 break;
73505
73506 case RTTEST_LOCKBKL:
73507@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
73508 return;
73509
73510 td->mutexes[dat] = 3;
73511- td->event = atomic_add_return(1, &rttest_event);
73512+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73513 break;
73514
73515 case RTTEST_LOCKNOWAIT:
73516@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
73517 return;
73518
73519 td->mutexes[dat] = 1;
73520- td->event = atomic_add_return(1, &rttest_event);
73521+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73522 return;
73523
73524 case RTTEST_LOCKBKL:
73525diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
73526index 29bd4ba..8c5de90 100644
73527--- a/kernel/rtmutex.c
73528+++ b/kernel/rtmutex.c
73529@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
73530 */
73531 spin_lock_irqsave(&pendowner->pi_lock, flags);
73532
73533- WARN_ON(!pendowner->pi_blocked_on);
73534+ BUG_ON(!pendowner->pi_blocked_on);
73535 WARN_ON(pendowner->pi_blocked_on != waiter);
73536 WARN_ON(pendowner->pi_blocked_on->lock != lock);
73537
73538diff --git a/kernel/sched.c b/kernel/sched.c
73539index 0591df8..6e343c3 100644
73540--- a/kernel/sched.c
73541+++ b/kernel/sched.c
73542@@ -2764,9 +2764,10 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
73543 {
73544 unsigned long flags;
73545 struct rq *rq;
73546- int cpu = get_cpu();
73547
73548 #ifdef CONFIG_SMP
73549+ int cpu = get_cpu();
73550+
73551 rq = task_rq_lock(p, &flags);
73552 p->state = TASK_WAKING;
73553
73554@@ -5043,7 +5044,7 @@ out:
73555 * In CONFIG_NO_HZ case, the idle load balance owner will do the
73556 * rebalancing for all the cpus for whom scheduler ticks are stopped.
73557 */
73558-static void run_rebalance_domains(struct softirq_action *h)
73559+static void run_rebalance_domains(void)
73560 {
73561 int this_cpu = smp_processor_id();
73562 struct rq *this_rq = cpu_rq(this_cpu);
73563@@ -5700,6 +5701,8 @@ asmlinkage void __sched schedule(void)
73564 struct rq *rq;
73565 int cpu;
73566
73567+ pax_track_stack();
73568+
73569 need_resched:
73570 preempt_disable();
73571 cpu = smp_processor_id();
73572@@ -5770,7 +5773,7 @@ EXPORT_SYMBOL(schedule);
73573 * Look out! "owner" is an entirely speculative pointer
73574 * access and not reliable.
73575 */
73576-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
73577+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
73578 {
73579 unsigned int cpu;
73580 struct rq *rq;
73581@@ -5784,10 +5787,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
73582 * DEBUG_PAGEALLOC could have unmapped it if
73583 * the mutex owner just released it and exited.
73584 */
73585- if (probe_kernel_address(&owner->cpu, cpu))
73586+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
73587 return 0;
73588 #else
73589- cpu = owner->cpu;
73590+ cpu = task_thread_info(owner)->cpu;
73591 #endif
73592
73593 /*
73594@@ -5816,7 +5819,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
73595 /*
73596 * Is that owner really running on that cpu?
73597 */
73598- if (task_thread_info(rq->curr) != owner || need_resched())
73599+ if (rq->curr != owner || need_resched())
73600 return 0;
73601
73602 cpu_relax();
73603@@ -6359,6 +6362,8 @@ int can_nice(const struct task_struct *p, const int nice)
73604 /* convert nice value [19,-20] to rlimit style value [1,40] */
73605 int nice_rlim = 20 - nice;
73606
73607+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
73608+
73609 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
73610 capable(CAP_SYS_NICE));
73611 }
73612@@ -6392,7 +6397,8 @@ SYSCALL_DEFINE1(nice, int, increment)
73613 if (nice > 19)
73614 nice = 19;
73615
73616- if (increment < 0 && !can_nice(current, nice))
73617+ if (increment < 0 && (!can_nice(current, nice) ||
73618+ gr_handle_chroot_nice()))
73619 return -EPERM;
73620
73621 retval = security_task_setnice(current, nice);
73622@@ -8774,7 +8780,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
73623 long power;
73624 int weight;
73625
73626- WARN_ON(!sd || !sd->groups);
73627+ BUG_ON(!sd || !sd->groups);
73628
73629 if (cpu != group_first_cpu(sd->groups))
73630 return;
73631diff --git a/kernel/signal.c b/kernel/signal.c
73632index 2494827..cda80a0 100644
73633--- a/kernel/signal.c
73634+++ b/kernel/signal.c
73635@@ -41,12 +41,12 @@
73636
73637 static struct kmem_cache *sigqueue_cachep;
73638
73639-static void __user *sig_handler(struct task_struct *t, int sig)
73640+static __sighandler_t sig_handler(struct task_struct *t, int sig)
73641 {
73642 return t->sighand->action[sig - 1].sa.sa_handler;
73643 }
73644
73645-static int sig_handler_ignored(void __user *handler, int sig)
73646+static int sig_handler_ignored(__sighandler_t handler, int sig)
73647 {
73648 /* Is it explicitly or implicitly ignored? */
73649 return handler == SIG_IGN ||
73650@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
73651 static int sig_task_ignored(struct task_struct *t, int sig,
73652 int from_ancestor_ns)
73653 {
73654- void __user *handler;
73655+ __sighandler_t handler;
73656
73657 handler = sig_handler(t, sig);
73658
73659@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
73660 */
73661 user = get_uid(__task_cred(t)->user);
73662 atomic_inc(&user->sigpending);
73663+
73664+ if (!override_rlimit)
73665+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
73666 if (override_rlimit ||
73667 atomic_read(&user->sigpending) <=
73668 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
73669@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
73670
73671 int unhandled_signal(struct task_struct *tsk, int sig)
73672 {
73673- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
73674+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
73675 if (is_global_init(tsk))
73676 return 1;
73677 if (handler != SIG_IGN && handler != SIG_DFL)
73678@@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
73679 }
73680 }
73681
73682+ /* allow glibc communication via tgkill to other threads in our
73683+ thread group */
73684+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
73685+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
73686+ && gr_handle_signal(t, sig))
73687+ return -EPERM;
73688+
73689 return security_task_kill(t, info, sig, 0);
73690 }
73691
73692@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
73693 return send_signal(sig, info, p, 1);
73694 }
73695
73696-static int
73697+int
73698 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
73699 {
73700 return send_signal(sig, info, t, 0);
73701@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
73702 unsigned long int flags;
73703 int ret, blocked, ignored;
73704 struct k_sigaction *action;
73705+ int is_unhandled = 0;
73706
73707 spin_lock_irqsave(&t->sighand->siglock, flags);
73708 action = &t->sighand->action[sig-1];
73709@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
73710 }
73711 if (action->sa.sa_handler == SIG_DFL)
73712 t->signal->flags &= ~SIGNAL_UNKILLABLE;
73713+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
73714+ is_unhandled = 1;
73715 ret = specific_send_sig_info(sig, info, t);
73716 spin_unlock_irqrestore(&t->sighand->siglock, flags);
73717
73718+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
73719+ normal operation */
73720+ if (is_unhandled) {
73721+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
73722+ gr_handle_crash(t, sig);
73723+ }
73724+
73725 return ret;
73726 }
73727
73728@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
73729 {
73730 int ret = check_kill_permission(sig, info, p);
73731
73732- if (!ret && sig)
73733+ if (!ret && sig) {
73734 ret = do_send_sig_info(sig, info, p, true);
73735+ if (!ret)
73736+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
73737+ }
73738
73739 return ret;
73740 }
73741@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
73742 {
73743 siginfo_t info;
73744
73745+ pax_track_stack();
73746+
73747 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
73748
73749 memset(&info, 0, sizeof info);
73750@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
73751 int error = -ESRCH;
73752
73753 rcu_read_lock();
73754- p = find_task_by_vpid(pid);
73755+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
73756+ /* allow glibc communication via tgkill to other threads in our
73757+ thread group */
73758+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
73759+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
73760+ p = find_task_by_vpid_unrestricted(pid);
73761+ else
73762+#endif
73763+ p = find_task_by_vpid(pid);
73764 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
73765 error = check_kill_permission(sig, info, p);
73766 /*
73767diff --git a/kernel/smp.c b/kernel/smp.c
73768index aa9cff3..631a0de 100644
73769--- a/kernel/smp.c
73770+++ b/kernel/smp.c
73771@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
73772 }
73773 EXPORT_SYMBOL(smp_call_function);
73774
73775-void ipi_call_lock(void)
73776+void ipi_call_lock(void) __acquires(call_function.lock)
73777 {
73778 spin_lock(&call_function.lock);
73779 }
73780
73781-void ipi_call_unlock(void)
73782+void ipi_call_unlock(void) __releases(call_function.lock)
73783 {
73784 spin_unlock(&call_function.lock);
73785 }
73786
73787-void ipi_call_lock_irq(void)
73788+void ipi_call_lock_irq(void) __acquires(call_function.lock)
73789 {
73790 spin_lock_irq(&call_function.lock);
73791 }
73792
73793-void ipi_call_unlock_irq(void)
73794+void ipi_call_unlock_irq(void) __releases(call_function.lock)
73795 {
73796 spin_unlock_irq(&call_function.lock);
73797 }
73798diff --git a/kernel/softirq.c b/kernel/softirq.c
73799index 04a0252..580c512 100644
73800--- a/kernel/softirq.c
73801+++ b/kernel/softirq.c
73802@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
73803
73804 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
73805
73806-char *softirq_to_name[NR_SOFTIRQS] = {
73807+const char * const softirq_to_name[NR_SOFTIRQS] = {
73808 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
73809 "TASKLET", "SCHED", "HRTIMER", "RCU"
73810 };
73811@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
73812
73813 asmlinkage void __do_softirq(void)
73814 {
73815- struct softirq_action *h;
73816+ const struct softirq_action *h;
73817 __u32 pending;
73818 int max_restart = MAX_SOFTIRQ_RESTART;
73819 int cpu;
73820@@ -233,7 +233,7 @@ restart:
73821 kstat_incr_softirqs_this_cpu(h - softirq_vec);
73822
73823 trace_softirq_entry(h, softirq_vec);
73824- h->action(h);
73825+ h->action();
73826 trace_softirq_exit(h, softirq_vec);
73827 if (unlikely(prev_count != preempt_count())) {
73828 printk(KERN_ERR "huh, entered softirq %td %s %p"
73829@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
73830 local_irq_restore(flags);
73831 }
73832
73833-void open_softirq(int nr, void (*action)(struct softirq_action *))
73834+void open_softirq(int nr, void (*action)(void))
73835 {
73836- softirq_vec[nr].action = action;
73837+ pax_open_kernel();
73838+ *(void **)&softirq_vec[nr].action = action;
73839+ pax_close_kernel();
73840 }
73841
73842 /*
73843@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
73844
73845 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
73846
73847-static void tasklet_action(struct softirq_action *a)
73848+static void tasklet_action(void)
73849 {
73850 struct tasklet_struct *list;
73851
73852@@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
73853 }
73854 }
73855
73856-static void tasklet_hi_action(struct softirq_action *a)
73857+static void tasklet_hi_action(void)
73858 {
73859 struct tasklet_struct *list;
73860
73861diff --git a/kernel/sys.c b/kernel/sys.c
73862index e9512b1..3c265de 100644
73863--- a/kernel/sys.c
73864+++ b/kernel/sys.c
73865@@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
73866 error = -EACCES;
73867 goto out;
73868 }
73869+
73870+ if (gr_handle_chroot_setpriority(p, niceval)) {
73871+ error = -EACCES;
73872+ goto out;
73873+ }
73874+
73875 no_nice = security_task_setnice(p, niceval);
73876 if (no_nice) {
73877 error = no_nice;
73878@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
73879 !(user = find_user(who)))
73880 goto out_unlock; /* No processes for this user */
73881
73882- do_each_thread(g, p)
73883+ do_each_thread(g, p) {
73884 if (__task_cred(p)->uid == who)
73885 error = set_one_prio(p, niceval, error);
73886- while_each_thread(g, p);
73887+ } while_each_thread(g, p);
73888 if (who != cred->uid)
73889 free_uid(user); /* For find_user() */
73890 break;
73891@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
73892 !(user = find_user(who)))
73893 goto out_unlock; /* No processes for this user */
73894
73895- do_each_thread(g, p)
73896+ do_each_thread(g, p) {
73897 if (__task_cred(p)->uid == who) {
73898 niceval = 20 - task_nice(p);
73899 if (niceval > retval)
73900 retval = niceval;
73901 }
73902- while_each_thread(g, p);
73903+ } while_each_thread(g, p);
73904 if (who != cred->uid)
73905 free_uid(user); /* for find_user() */
73906 break;
73907@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
73908 goto error;
73909 }
73910
73911+ if (gr_check_group_change(new->gid, new->egid, -1))
73912+ goto error;
73913+
73914 if (rgid != (gid_t) -1 ||
73915 (egid != (gid_t) -1 && egid != old->gid))
73916 new->sgid = new->egid;
73917@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
73918 goto error;
73919
73920 retval = -EPERM;
73921+
73922+ if (gr_check_group_change(gid, gid, gid))
73923+ goto error;
73924+
73925 if (capable(CAP_SETGID))
73926 new->gid = new->egid = new->sgid = new->fsgid = gid;
73927 else if (gid == old->gid || gid == old->sgid)
73928@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
73929 if (!new_user)
73930 return -EAGAIN;
73931
73932+ /*
73933+ * We don't fail in case of NPROC limit excess here because too many
73934+ * poorly written programs don't check set*uid() return code, assuming
73935+ * it never fails if called by root. We may still enforce NPROC limit
73936+ * for programs doing set*uid()+execve() by harmlessly deferring the
73937+ * failure to the execve() stage.
73938+ */
73939 if (atomic_read(&new_user->processes) >=
73940 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
73941- new_user != INIT_USER) {
73942- free_uid(new_user);
73943- return -EAGAIN;
73944- }
73945+ new_user != INIT_USER)
73946+ current->flags |= PF_NPROC_EXCEEDED;
73947+ else
73948+ current->flags &= ~PF_NPROC_EXCEEDED;
73949
73950 free_uid(new->user);
73951 new->user = new_user;
73952@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
73953 goto error;
73954 }
73955
73956+ if (gr_check_user_change(new->uid, new->euid, -1))
73957+ goto error;
73958+
73959 if (new->uid != old->uid) {
73960 retval = set_user(new);
73961 if (retval < 0)
73962@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
73963 goto error;
73964
73965 retval = -EPERM;
73966+
73967+ if (gr_check_crash_uid(uid))
73968+ goto error;
73969+ if (gr_check_user_change(uid, uid, uid))
73970+ goto error;
73971+
73972 if (capable(CAP_SETUID)) {
73973 new->suid = new->uid = uid;
73974 if (uid != old->uid) {
73975@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
73976 goto error;
73977 }
73978
73979+ if (gr_check_user_change(ruid, euid, -1))
73980+ goto error;
73981+
73982 if (ruid != (uid_t) -1) {
73983 new->uid = ruid;
73984 if (ruid != old->uid) {
73985@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
73986 goto error;
73987 }
73988
73989+ if (gr_check_group_change(rgid, egid, -1))
73990+ goto error;
73991+
73992 if (rgid != (gid_t) -1)
73993 new->gid = rgid;
73994 if (egid != (gid_t) -1)
73995@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
73996 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
73997 goto error;
73998
73999+ if (gr_check_user_change(-1, -1, uid))
74000+ goto error;
74001+
74002 if (uid == old->uid || uid == old->euid ||
74003 uid == old->suid || uid == old->fsuid ||
74004 capable(CAP_SETUID)) {
74005@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
74006 if (gid == old->gid || gid == old->egid ||
74007 gid == old->sgid || gid == old->fsgid ||
74008 capable(CAP_SETGID)) {
74009+ if (gr_check_group_change(-1, -1, gid))
74010+ goto error;
74011+
74012 if (gid != old_fsgid) {
74013 new->fsgid = gid;
74014 goto change_okay;
74015@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
74016 error = get_dumpable(me->mm);
74017 break;
74018 case PR_SET_DUMPABLE:
74019- if (arg2 < 0 || arg2 > 1) {
74020+ if (arg2 > 1) {
74021 error = -EINVAL;
74022 break;
74023 }
74024diff --git a/kernel/sysctl.c b/kernel/sysctl.c
74025index b8bd058..ab6a76be 100644
74026--- a/kernel/sysctl.c
74027+++ b/kernel/sysctl.c
74028@@ -63,6 +63,13 @@
74029 static int deprecated_sysctl_warning(struct __sysctl_args *args);
74030
74031 #if defined(CONFIG_SYSCTL)
74032+#include <linux/grsecurity.h>
74033+#include <linux/grinternal.h>
74034+
74035+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
74036+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
74037+ const int op);
74038+extern int gr_handle_chroot_sysctl(const int op);
74039
74040 /* External variables not in a header file. */
74041 extern int C_A_D;
74042@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
74043 static int proc_taint(struct ctl_table *table, int write,
74044 void __user *buffer, size_t *lenp, loff_t *ppos);
74045 #endif
74046+extern ctl_table grsecurity_table[];
74047
74048 static struct ctl_table root_table[];
74049 static struct ctl_table_root sysctl_table_root;
74050@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
74051 int sysctl_legacy_va_layout;
74052 #endif
74053
74054+#ifdef CONFIG_PAX_SOFTMODE
74055+static ctl_table pax_table[] = {
74056+ {
74057+ .ctl_name = CTL_UNNUMBERED,
74058+ .procname = "softmode",
74059+ .data = &pax_softmode,
74060+ .maxlen = sizeof(unsigned int),
74061+ .mode = 0600,
74062+ .proc_handler = &proc_dointvec,
74063+ },
74064+
74065+ { .ctl_name = 0 }
74066+};
74067+#endif
74068+
74069 extern int prove_locking;
74070 extern int lock_stat;
74071
74072@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
74073 #endif
74074
74075 static struct ctl_table kern_table[] = {
74076+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
74077+ {
74078+ .ctl_name = CTL_UNNUMBERED,
74079+ .procname = "grsecurity",
74080+ .mode = 0500,
74081+ .child = grsecurity_table,
74082+ },
74083+#endif
74084+
74085+#ifdef CONFIG_PAX_SOFTMODE
74086+ {
74087+ .ctl_name = CTL_UNNUMBERED,
74088+ .procname = "pax",
74089+ .mode = 0500,
74090+ .child = pax_table,
74091+ },
74092+#endif
74093+
74094 {
74095 .ctl_name = CTL_UNNUMBERED,
74096 .procname = "sched_child_runs_first",
74097@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
74098 .data = &modprobe_path,
74099 .maxlen = KMOD_PATH_LEN,
74100 .mode = 0644,
74101- .proc_handler = &proc_dostring,
74102- .strategy = &sysctl_string,
74103+ .proc_handler = &proc_dostring_modpriv,
74104+ .strategy = &sysctl_string_modpriv,
74105 },
74106 {
74107 .ctl_name = CTL_UNNUMBERED,
74108@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
74109 .mode = 0644,
74110 .proc_handler = &proc_dointvec
74111 },
74112+ {
74113+ .procname = "heap_stack_gap",
74114+ .data = &sysctl_heap_stack_gap,
74115+ .maxlen = sizeof(sysctl_heap_stack_gap),
74116+ .mode = 0644,
74117+ .proc_handler = proc_doulongvec_minmax,
74118+ },
74119 #else
74120 {
74121 .ctl_name = CTL_UNNUMBERED,
74122@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
74123 return 0;
74124 }
74125
74126+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
74127+
74128 static int parse_table(int __user *name, int nlen,
74129 void __user *oldval, size_t __user *oldlenp,
74130 void __user *newval, size_t newlen,
74131@@ -1821,7 +1871,7 @@ repeat:
74132 if (n == table->ctl_name) {
74133 int error;
74134 if (table->child) {
74135- if (sysctl_perm(root, table, MAY_EXEC))
74136+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
74137 return -EPERM;
74138 name++;
74139 nlen--;
74140@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
74141 int error;
74142 int mode;
74143
74144+ if (table->parent != NULL && table->parent->procname != NULL &&
74145+ table->procname != NULL &&
74146+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
74147+ return -EACCES;
74148+ if (gr_handle_chroot_sysctl(op))
74149+ return -EACCES;
74150+ error = gr_handle_sysctl(table, op);
74151+ if (error)
74152+ return error;
74153+
74154+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
74155+ if (error)
74156+ return error;
74157+
74158+ if (root->permissions)
74159+ mode = root->permissions(root, current->nsproxy, table);
74160+ else
74161+ mode = table->mode;
74162+
74163+ return test_perm(mode, op);
74164+}
74165+
74166+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
74167+{
74168+ int error;
74169+ int mode;
74170+
74171 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
74172 if (error)
74173 return error;
74174@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
74175 buffer, lenp, ppos);
74176 }
74177
74178+int proc_dostring_modpriv(struct ctl_table *table, int write,
74179+ void __user *buffer, size_t *lenp, loff_t *ppos)
74180+{
74181+ if (write && !capable(CAP_SYS_MODULE))
74182+ return -EPERM;
74183+
74184+ return _proc_do_string(table->data, table->maxlen, write,
74185+ buffer, lenp, ppos);
74186+}
74187+
74188
74189 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
74190 int *valp,
74191@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
74192 vleft = table->maxlen / sizeof(unsigned long);
74193 left = *lenp;
74194
74195- for (; left && vleft--; i++, min++, max++, first=0) {
74196+ for (; left && vleft--; i++, first=0) {
74197 if (write) {
74198 while (left) {
74199 char c;
74200@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
74201 return -ENOSYS;
74202 }
74203
74204+int proc_dostring_modpriv(struct ctl_table *table, int write,
74205+ void __user *buffer, size_t *lenp, loff_t *ppos)
74206+{
74207+ return -ENOSYS;
74208+}
74209+
74210 int proc_dointvec(struct ctl_table *table, int write,
74211 void __user *buffer, size_t *lenp, loff_t *ppos)
74212 {
74213@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
74214 return 1;
74215 }
74216
74217+int sysctl_string_modpriv(struct ctl_table *table,
74218+ void __user *oldval, size_t __user *oldlenp,
74219+ void __user *newval, size_t newlen)
74220+{
74221+ if (newval && newlen && !capable(CAP_SYS_MODULE))
74222+ return -EPERM;
74223+
74224+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
74225+}
74226+
74227 /*
74228 * This function makes sure that all of the integers in the vector
74229 * are between the minimum and maximum values given in the arrays
74230@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
74231 return -ENOSYS;
74232 }
74233
74234+int sysctl_string_modpriv(struct ctl_table *table,
74235+ void __user *oldval, size_t __user *oldlenp,
74236+ void __user *newval, size_t newlen)
74237+{
74238+ return -ENOSYS;
74239+}
74240+
74241 int sysctl_intvec(struct ctl_table *table,
74242 void __user *oldval, size_t __user *oldlenp,
74243 void __user *newval, size_t newlen)
74244@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
74245 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
74246 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
74247 EXPORT_SYMBOL(proc_dostring);
74248+EXPORT_SYMBOL(proc_dostring_modpriv);
74249 EXPORT_SYMBOL(proc_doulongvec_minmax);
74250 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
74251 EXPORT_SYMBOL(register_sysctl_table);
74252@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
74253 EXPORT_SYMBOL(sysctl_jiffies);
74254 EXPORT_SYMBOL(sysctl_ms_jiffies);
74255 EXPORT_SYMBOL(sysctl_string);
74256+EXPORT_SYMBOL(sysctl_string_modpriv);
74257 EXPORT_SYMBOL(sysctl_data);
74258 EXPORT_SYMBOL(unregister_sysctl_table);
74259diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
74260index 469193c..ea3ecb2 100644
74261--- a/kernel/sysctl_check.c
74262+++ b/kernel/sysctl_check.c
74263@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
74264 } else {
74265 if ((table->strategy == sysctl_data) ||
74266 (table->strategy == sysctl_string) ||
74267+ (table->strategy == sysctl_string_modpriv) ||
74268 (table->strategy == sysctl_intvec) ||
74269 (table->strategy == sysctl_jiffies) ||
74270 (table->strategy == sysctl_ms_jiffies) ||
74271 (table->proc_handler == proc_dostring) ||
74272+ (table->proc_handler == proc_dostring_modpriv) ||
74273 (table->proc_handler == proc_dointvec) ||
74274 (table->proc_handler == proc_dointvec_minmax) ||
74275 (table->proc_handler == proc_dointvec_jiffies) ||
74276diff --git a/kernel/taskstats.c b/kernel/taskstats.c
74277index b080920..d344f89 100644
74278--- a/kernel/taskstats.c
74279+++ b/kernel/taskstats.c
74280@@ -26,9 +26,12 @@
74281 #include <linux/cgroup.h>
74282 #include <linux/fs.h>
74283 #include <linux/file.h>
74284+#include <linux/grsecurity.h>
74285 #include <net/genetlink.h>
74286 #include <asm/atomic.h>
74287
74288+extern int gr_is_taskstats_denied(int pid);
74289+
74290 /*
74291 * Maximum length of a cpumask that can be specified in
74292 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
74293@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
74294 size_t size;
74295 cpumask_var_t mask;
74296
74297+ if (gr_is_taskstats_denied(current->pid))
74298+ return -EACCES;
74299+
74300 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
74301 return -ENOMEM;
74302
74303diff --git a/kernel/time.c b/kernel/time.c
74304index 33df60e..ca768bd 100644
74305--- a/kernel/time.c
74306+++ b/kernel/time.c
74307@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
74308 return error;
74309
74310 if (tz) {
74311+ /* we log in do_settimeofday called below, so don't log twice
74312+ */
74313+ if (!tv)
74314+ gr_log_timechange();
74315+
74316 /* SMP safe, global irq locking makes it work. */
74317 sys_tz = *tz;
74318 update_vsyscall_tz();
74319@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
74320 * Avoid unnecessary multiplications/divisions in the
74321 * two most common HZ cases:
74322 */
74323-unsigned int inline jiffies_to_msecs(const unsigned long j)
74324+inline unsigned int jiffies_to_msecs(const unsigned long j)
74325 {
74326 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
74327 return (MSEC_PER_SEC / HZ) * j;
74328@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
74329 }
74330 EXPORT_SYMBOL(jiffies_to_msecs);
74331
74332-unsigned int inline jiffies_to_usecs(const unsigned long j)
74333+inline unsigned int jiffies_to_usecs(const unsigned long j)
74334 {
74335 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
74336 return (USEC_PER_SEC / HZ) * j;
74337diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
74338index 8917fd3..5f0ead6 100644
74339--- a/kernel/time/tick-broadcast.c
74340+++ b/kernel/time/tick-broadcast.c
74341@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
74342 * then clear the broadcast bit.
74343 */
74344 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
74345- int cpu = smp_processor_id();
74346+ cpu = smp_processor_id();
74347
74348 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
74349 tick_broadcast_clear_oneshot(cpu);
74350diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
74351index 1d1206a..08a7c2f 100644
74352--- a/kernel/time/timekeeping.c
74353+++ b/kernel/time/timekeeping.c
74354@@ -14,6 +14,7 @@
74355 #include <linux/init.h>
74356 #include <linux/mm.h>
74357 #include <linux/sched.h>
74358+#include <linux/grsecurity.h>
74359 #include <linux/sysdev.h>
74360 #include <linux/clocksource.h>
74361 #include <linux/jiffies.h>
74362@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
74363 */
74364 struct timespec ts = xtime;
74365 timespec_add_ns(&ts, nsec);
74366- ACCESS_ONCE(xtime_cache) = ts;
74367+ ACCESS_ONCE_RW(xtime_cache) = ts;
74368 }
74369
74370 /* must hold xtime_lock */
74371@@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
74372 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
74373 return -EINVAL;
74374
74375+ gr_log_timechange();
74376+
74377 write_seqlock_irqsave(&xtime_lock, flags);
74378
74379 timekeeping_forward_now();
74380diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
74381index 54c0dda..e9095d9 100644
74382--- a/kernel/time/timer_list.c
74383+++ b/kernel/time/timer_list.c
74384@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
74385
74386 static void print_name_offset(struct seq_file *m, void *sym)
74387 {
74388+#ifdef CONFIG_GRKERNSEC_HIDESYM
74389+ SEQ_printf(m, "<%p>", NULL);
74390+#else
74391 char symname[KSYM_NAME_LEN];
74392
74393 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
74394 SEQ_printf(m, "<%p>", sym);
74395 else
74396 SEQ_printf(m, "%s", symname);
74397+#endif
74398 }
74399
74400 static void
74401@@ -112,7 +116,11 @@ next_one:
74402 static void
74403 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
74404 {
74405+#ifdef CONFIG_GRKERNSEC_HIDESYM
74406+ SEQ_printf(m, " .base: %p\n", NULL);
74407+#else
74408 SEQ_printf(m, " .base: %p\n", base);
74409+#endif
74410 SEQ_printf(m, " .index: %d\n",
74411 base->index);
74412 SEQ_printf(m, " .resolution: %Lu nsecs\n",
74413@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
74414 {
74415 struct proc_dir_entry *pe;
74416
74417+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74418+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
74419+#else
74420 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
74421+#endif
74422 if (!pe)
74423 return -ENOMEM;
74424 return 0;
74425diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
74426index ee5681f..634089b 100644
74427--- a/kernel/time/timer_stats.c
74428+++ b/kernel/time/timer_stats.c
74429@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
74430 static unsigned long nr_entries;
74431 static struct entry entries[MAX_ENTRIES];
74432
74433-static atomic_t overflow_count;
74434+static atomic_unchecked_t overflow_count;
74435
74436 /*
74437 * The entries are in a hash-table, for fast lookup:
74438@@ -140,7 +140,7 @@ static void reset_entries(void)
74439 nr_entries = 0;
74440 memset(entries, 0, sizeof(entries));
74441 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
74442- atomic_set(&overflow_count, 0);
74443+ atomic_set_unchecked(&overflow_count, 0);
74444 }
74445
74446 static struct entry *alloc_entry(void)
74447@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
74448 if (likely(entry))
74449 entry->count++;
74450 else
74451- atomic_inc(&overflow_count);
74452+ atomic_inc_unchecked(&overflow_count);
74453
74454 out_unlock:
74455 spin_unlock_irqrestore(lock, flags);
74456@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
74457
74458 static void print_name_offset(struct seq_file *m, unsigned long addr)
74459 {
74460+#ifdef CONFIG_GRKERNSEC_HIDESYM
74461+ seq_printf(m, "<%p>", NULL);
74462+#else
74463 char symname[KSYM_NAME_LEN];
74464
74465 if (lookup_symbol_name(addr, symname) < 0)
74466 seq_printf(m, "<%p>", (void *)addr);
74467 else
74468 seq_printf(m, "%s", symname);
74469+#endif
74470 }
74471
74472 static int tstats_show(struct seq_file *m, void *v)
74473@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
74474
74475 seq_puts(m, "Timer Stats Version: v0.2\n");
74476 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
74477- if (atomic_read(&overflow_count))
74478+ if (atomic_read_unchecked(&overflow_count))
74479 seq_printf(m, "Overflow: %d entries\n",
74480- atomic_read(&overflow_count));
74481+ atomic_read_unchecked(&overflow_count));
74482
74483 for (i = 0; i < nr_entries; i++) {
74484 entry = entries + i;
74485@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
74486 {
74487 struct proc_dir_entry *pe;
74488
74489+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74490+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
74491+#else
74492 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
74493+#endif
74494 if (!pe)
74495 return -ENOMEM;
74496 return 0;
74497diff --git a/kernel/timer.c b/kernel/timer.c
74498index cb3c1f1..8bf5526 100644
74499--- a/kernel/timer.c
74500+++ b/kernel/timer.c
74501@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
74502 /*
74503 * This function runs timers and the timer-tq in bottom half context.
74504 */
74505-static void run_timer_softirq(struct softirq_action *h)
74506+static void run_timer_softirq(void)
74507 {
74508 struct tvec_base *base = __get_cpu_var(tvec_bases);
74509
74510diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
74511index d9d6206..f19467e 100644
74512--- a/kernel/trace/blktrace.c
74513+++ b/kernel/trace/blktrace.c
74514@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
74515 struct blk_trace *bt = filp->private_data;
74516 char buf[16];
74517
74518- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
74519+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
74520
74521 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
74522 }
74523@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
74524 return 1;
74525
74526 bt = buf->chan->private_data;
74527- atomic_inc(&bt->dropped);
74528+ atomic_inc_unchecked(&bt->dropped);
74529 return 0;
74530 }
74531
74532@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
74533
74534 bt->dir = dir;
74535 bt->dev = dev;
74536- atomic_set(&bt->dropped, 0);
74537+ atomic_set_unchecked(&bt->dropped, 0);
74538
74539 ret = -EIO;
74540 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
74541diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
74542index 4872937..c794d40 100644
74543--- a/kernel/trace/ftrace.c
74544+++ b/kernel/trace/ftrace.c
74545@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
74546
74547 ip = rec->ip;
74548
74549+ ret = ftrace_arch_code_modify_prepare();
74550+ FTRACE_WARN_ON(ret);
74551+ if (ret)
74552+ return 0;
74553+
74554 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
74555+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
74556 if (ret) {
74557 ftrace_bug(ret, ip);
74558 rec->flags |= FTRACE_FL_FAILED;
74559- return 0;
74560 }
74561- return 1;
74562+ return ret ? 0 : 1;
74563 }
74564
74565 /*
74566diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
74567index e749a05..19c6e94 100644
74568--- a/kernel/trace/ring_buffer.c
74569+++ b/kernel/trace/ring_buffer.c
74570@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
74571 * the reader page). But if the next page is a header page,
74572 * its flags will be non zero.
74573 */
74574-static int inline
74575+static inline int
74576 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
74577 struct buffer_page *page, struct list_head *list)
74578 {
74579diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
74580index a2a2d1f..7f32b09 100644
74581--- a/kernel/trace/trace.c
74582+++ b/kernel/trace/trace.c
74583@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
74584 size_t rem;
74585 unsigned int i;
74586
74587+ pax_track_stack();
74588+
74589 /* copy the tracer to avoid using a global lock all around */
74590 mutex_lock(&trace_types_lock);
74591 if (unlikely(old_tracer != current_trace && current_trace)) {
74592@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
74593 int entries, size, i;
74594 size_t ret;
74595
74596+ pax_track_stack();
74597+
74598 if (*ppos & (PAGE_SIZE - 1)) {
74599 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
74600 return -EINVAL;
74601@@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
74602 };
74603 #endif
74604
74605-static struct dentry *d_tracer;
74606-
74607 struct dentry *tracing_init_dentry(void)
74608 {
74609+ static struct dentry *d_tracer;
74610 static int once;
74611
74612 if (d_tracer)
74613@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
74614 return d_tracer;
74615 }
74616
74617-static struct dentry *d_percpu;
74618-
74619 struct dentry *tracing_dentry_percpu(void)
74620 {
74621+ static struct dentry *d_percpu;
74622 static int once;
74623 struct dentry *d_tracer;
74624
74625diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
74626index d128f65..f37b4af 100644
74627--- a/kernel/trace/trace_events.c
74628+++ b/kernel/trace/trace_events.c
74629@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
74630 * Modules must own their file_operations to keep up with
74631 * reference counting.
74632 */
74633+
74634 struct ftrace_module_file_ops {
74635 struct list_head list;
74636 struct module *mod;
74637- struct file_operations id;
74638- struct file_operations enable;
74639- struct file_operations format;
74640- struct file_operations filter;
74641 };
74642
74643 static void remove_subsystem_dir(const char *name)
74644@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
74645
74646 file_ops->mod = mod;
74647
74648- file_ops->id = ftrace_event_id_fops;
74649- file_ops->id.owner = mod;
74650-
74651- file_ops->enable = ftrace_enable_fops;
74652- file_ops->enable.owner = mod;
74653-
74654- file_ops->filter = ftrace_event_filter_fops;
74655- file_ops->filter.owner = mod;
74656-
74657- file_ops->format = ftrace_event_format_fops;
74658- file_ops->format.owner = mod;
74659+ pax_open_kernel();
74660+ *(void **)&mod->trace_id.owner = mod;
74661+ *(void **)&mod->trace_enable.owner = mod;
74662+ *(void **)&mod->trace_filter.owner = mod;
74663+ *(void **)&mod->trace_format.owner = mod;
74664+ pax_close_kernel();
74665
74666 list_add(&file_ops->list, &ftrace_module_file_list);
74667
74668@@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
74669 call->mod = mod;
74670 list_add(&call->list, &ftrace_events);
74671 event_create_dir(call, d_events,
74672- &file_ops->id, &file_ops->enable,
74673- &file_ops->filter, &file_ops->format);
74674+ &mod->trace_id, &mod->trace_enable,
74675+ &mod->trace_filter, &mod->trace_format);
74676 }
74677 }
74678
74679diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
74680index 0acd834..b800b56 100644
74681--- a/kernel/trace/trace_mmiotrace.c
74682+++ b/kernel/trace/trace_mmiotrace.c
74683@@ -23,7 +23,7 @@ struct header_iter {
74684 static struct trace_array *mmio_trace_array;
74685 static bool overrun_detected;
74686 static unsigned long prev_overruns;
74687-static atomic_t dropped_count;
74688+static atomic_unchecked_t dropped_count;
74689
74690 static void mmio_reset_data(struct trace_array *tr)
74691 {
74692@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
74693
74694 static unsigned long count_overruns(struct trace_iterator *iter)
74695 {
74696- unsigned long cnt = atomic_xchg(&dropped_count, 0);
74697+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
74698 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
74699
74700 if (over > prev_overruns)
74701@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
74702 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
74703 sizeof(*entry), 0, pc);
74704 if (!event) {
74705- atomic_inc(&dropped_count);
74706+ atomic_inc_unchecked(&dropped_count);
74707 return;
74708 }
74709 entry = ring_buffer_event_data(event);
74710@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
74711 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
74712 sizeof(*entry), 0, pc);
74713 if (!event) {
74714- atomic_inc(&dropped_count);
74715+ atomic_inc_unchecked(&dropped_count);
74716 return;
74717 }
74718 entry = ring_buffer_event_data(event);
74719diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
74720index b6c12c6..41fdc53 100644
74721--- a/kernel/trace/trace_output.c
74722+++ b/kernel/trace/trace_output.c
74723@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
74724 return 0;
74725 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
74726 if (!IS_ERR(p)) {
74727- p = mangle_path(s->buffer + s->len, p, "\n");
74728+ p = mangle_path(s->buffer + s->len, p, "\n\\");
74729 if (p) {
74730 s->len = p - s->buffer;
74731 return 1;
74732diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
74733index 8504ac7..ecf0adb 100644
74734--- a/kernel/trace/trace_stack.c
74735+++ b/kernel/trace/trace_stack.c
74736@@ -50,7 +50,7 @@ static inline void check_stack(void)
74737 return;
74738
74739 /* we do not handle interrupt stacks yet */
74740- if (!object_is_on_stack(&this_size))
74741+ if (!object_starts_on_stack(&this_size))
74742 return;
74743
74744 local_irq_save(flags);
74745diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
74746index 40cafb0..d5ead43 100644
74747--- a/kernel/trace/trace_workqueue.c
74748+++ b/kernel/trace/trace_workqueue.c
74749@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
74750 int cpu;
74751 pid_t pid;
74752 /* Can be inserted from interrupt or user context, need to be atomic */
74753- atomic_t inserted;
74754+ atomic_unchecked_t inserted;
74755 /*
74756 * Don't need to be atomic, works are serialized in a single workqueue thread
74757 * on a single CPU.
74758@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
74759 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
74760 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
74761 if (node->pid == wq_thread->pid) {
74762- atomic_inc(&node->inserted);
74763+ atomic_inc_unchecked(&node->inserted);
74764 goto found;
74765 }
74766 }
74767@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
74768 tsk = get_pid_task(pid, PIDTYPE_PID);
74769 if (tsk) {
74770 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
74771- atomic_read(&cws->inserted), cws->executed,
74772+ atomic_read_unchecked(&cws->inserted), cws->executed,
74773 tsk->comm);
74774 put_task_struct(tsk);
74775 }
74776diff --git a/kernel/user.c b/kernel/user.c
74777index 1b91701..8795237 100644
74778--- a/kernel/user.c
74779+++ b/kernel/user.c
74780@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
74781 spin_lock_irq(&uidhash_lock);
74782 up = uid_hash_find(uid, hashent);
74783 if (up) {
74784+ put_user_ns(ns);
74785 key_put(new->uid_keyring);
74786 key_put(new->session_keyring);
74787 kmem_cache_free(uid_cachep, new);
74788diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
74789index 234ceb1..ad74049 100644
74790--- a/lib/Kconfig.debug
74791+++ b/lib/Kconfig.debug
74792@@ -905,7 +905,7 @@ config LATENCYTOP
74793 select STACKTRACE
74794 select SCHEDSTATS
74795 select SCHED_DEBUG
74796- depends on HAVE_LATENCYTOP_SUPPORT
74797+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
74798 help
74799 Enable this option if you want to use the LatencyTOP tool
74800 to find out which userspace is blocking on what kernel operations.
74801diff --git a/lib/bitmap.c b/lib/bitmap.c
74802index 7025658..8d14cab 100644
74803--- a/lib/bitmap.c
74804+++ b/lib/bitmap.c
74805@@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
74806 {
74807 int c, old_c, totaldigits, ndigits, nchunks, nbits;
74808 u32 chunk;
74809- const char __user *ubuf = buf;
74810+ const char __user *ubuf = (const char __force_user *)buf;
74811
74812 bitmap_zero(maskp, nmaskbits);
74813
74814@@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
74815 {
74816 if (!access_ok(VERIFY_READ, ubuf, ulen))
74817 return -EFAULT;
74818- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
74819+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
74820 }
74821 EXPORT_SYMBOL(bitmap_parse_user);
74822
74823diff --git a/lib/bug.c b/lib/bug.c
74824index 300e41a..2779eb0 100644
74825--- a/lib/bug.c
74826+++ b/lib/bug.c
74827@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
74828 return BUG_TRAP_TYPE_NONE;
74829
74830 bug = find_bug(bugaddr);
74831+ if (!bug)
74832+ return BUG_TRAP_TYPE_NONE;
74833
74834 printk(KERN_EMERG "------------[ cut here ]------------\n");
74835
74836diff --git a/lib/debugobjects.c b/lib/debugobjects.c
74837index 2b413db..e21d207 100644
74838--- a/lib/debugobjects.c
74839+++ b/lib/debugobjects.c
74840@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
74841 if (limit > 4)
74842 return;
74843
74844- is_on_stack = object_is_on_stack(addr);
74845+ is_on_stack = object_starts_on_stack(addr);
74846 if (is_on_stack == onstack)
74847 return;
74848
74849diff --git a/lib/devres.c b/lib/devres.c
74850index 72c8909..7543868 100644
74851--- a/lib/devres.c
74852+++ b/lib/devres.c
74853@@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
74854 {
74855 iounmap(addr);
74856 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
74857- (void *)addr));
74858+ (void __force *)addr));
74859 }
74860 EXPORT_SYMBOL(devm_iounmap);
74861
74862@@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
74863 {
74864 ioport_unmap(addr);
74865 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
74866- devm_ioport_map_match, (void *)addr));
74867+ devm_ioport_map_match, (void __force *)addr));
74868 }
74869 EXPORT_SYMBOL(devm_ioport_unmap);
74870
74871diff --git a/lib/dma-debug.c b/lib/dma-debug.c
74872index 084e879..0674448 100644
74873--- a/lib/dma-debug.c
74874+++ b/lib/dma-debug.c
74875@@ -861,7 +861,7 @@ out:
74876
74877 static void check_for_stack(struct device *dev, void *addr)
74878 {
74879- if (object_is_on_stack(addr))
74880+ if (object_starts_on_stack(addr))
74881 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
74882 "stack [addr=%p]\n", addr);
74883 }
74884diff --git a/lib/idr.c b/lib/idr.c
74885index eda7ba3..915dfae 100644
74886--- a/lib/idr.c
74887+++ b/lib/idr.c
74888@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
74889 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
74890
74891 /* if already at the top layer, we need to grow */
74892- if (id >= 1 << (idp->layers * IDR_BITS)) {
74893+ if (id >= (1 << (idp->layers * IDR_BITS))) {
74894 *starting_id = id;
74895 return IDR_NEED_TO_GROW;
74896 }
74897diff --git a/lib/inflate.c b/lib/inflate.c
74898index d102559..4215f31 100644
74899--- a/lib/inflate.c
74900+++ b/lib/inflate.c
74901@@ -266,7 +266,7 @@ static void free(void *where)
74902 malloc_ptr = free_mem_ptr;
74903 }
74904 #else
74905-#define malloc(a) kmalloc(a, GFP_KERNEL)
74906+#define malloc(a) kmalloc((a), GFP_KERNEL)
74907 #define free(a) kfree(a)
74908 #endif
74909
74910diff --git a/lib/kobject.c b/lib/kobject.c
74911index b512b74..8115eb1 100644
74912--- a/lib/kobject.c
74913+++ b/lib/kobject.c
74914@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
74915 return ret;
74916 }
74917
74918-struct sysfs_ops kobj_sysfs_ops = {
74919+const struct sysfs_ops kobj_sysfs_ops = {
74920 .show = kobj_attr_show,
74921 .store = kobj_attr_store,
74922 };
74923@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
74924 * If the kset was not able to be created, NULL will be returned.
74925 */
74926 static struct kset *kset_create(const char *name,
74927- struct kset_uevent_ops *uevent_ops,
74928+ const struct kset_uevent_ops *uevent_ops,
74929 struct kobject *parent_kobj)
74930 {
74931 struct kset *kset;
74932@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
74933 * If the kset was not able to be created, NULL will be returned.
74934 */
74935 struct kset *kset_create_and_add(const char *name,
74936- struct kset_uevent_ops *uevent_ops,
74937+ const struct kset_uevent_ops *uevent_ops,
74938 struct kobject *parent_kobj)
74939 {
74940 struct kset *kset;
74941diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
74942index 507b821..0bf8ed0 100644
74943--- a/lib/kobject_uevent.c
74944+++ b/lib/kobject_uevent.c
74945@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
74946 const char *subsystem;
74947 struct kobject *top_kobj;
74948 struct kset *kset;
74949- struct kset_uevent_ops *uevent_ops;
74950+ const struct kset_uevent_ops *uevent_ops;
74951 u64 seq;
74952 int i = 0;
74953 int retval = 0;
74954diff --git a/lib/kref.c b/lib/kref.c
74955index 9ecd6e8..12c94c1 100644
74956--- a/lib/kref.c
74957+++ b/lib/kref.c
74958@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
74959 */
74960 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
74961 {
74962- WARN_ON(release == NULL);
74963+ BUG_ON(release == NULL);
74964 WARN_ON(release == (void (*)(struct kref *))kfree);
74965
74966 if (atomic_dec_and_test(&kref->refcount)) {
74967diff --git a/lib/parser.c b/lib/parser.c
74968index b00d020..1b34325 100644
74969--- a/lib/parser.c
74970+++ b/lib/parser.c
74971@@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
74972 char *buf;
74973 int ret;
74974
74975- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
74976+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
74977 if (!buf)
74978 return -ENOMEM;
74979 memcpy(buf, s->from, s->to - s->from);
74980diff --git a/lib/radix-tree.c b/lib/radix-tree.c
74981index 92cdd99..a8149d7 100644
74982--- a/lib/radix-tree.c
74983+++ b/lib/radix-tree.c
74984@@ -81,7 +81,7 @@ struct radix_tree_preload {
74985 int nr;
74986 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
74987 };
74988-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
74989+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
74990
74991 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
74992 {
74993diff --git a/lib/random32.c b/lib/random32.c
74994index 217d5c4..45aba8a 100644
74995--- a/lib/random32.c
74996+++ b/lib/random32.c
74997@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
74998 */
74999 static inline u32 __seed(u32 x, u32 m)
75000 {
75001- return (x < m) ? x + m : x;
75002+ return (x <= m) ? x + m + 1 : x;
75003 }
75004
75005 /**
75006diff --git a/lib/vsprintf.c b/lib/vsprintf.c
75007index 33bed5e..1477e46 100644
75008--- a/lib/vsprintf.c
75009+++ b/lib/vsprintf.c
75010@@ -16,6 +16,9 @@
75011 * - scnprintf and vscnprintf
75012 */
75013
75014+#ifdef CONFIG_GRKERNSEC_HIDESYM
75015+#define __INCLUDED_BY_HIDESYM 1
75016+#endif
75017 #include <stdarg.h>
75018 #include <linux/module.h>
75019 #include <linux/types.h>
75020@@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
75021 return buf;
75022 }
75023
75024-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
75025+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
75026 {
75027 int len, i;
75028
75029 if ((unsigned long)s < PAGE_SIZE)
75030- s = "<NULL>";
75031+ s = "(null)";
75032
75033 len = strnlen(s, spec.precision);
75034
75035@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
75036 unsigned long value = (unsigned long) ptr;
75037 #ifdef CONFIG_KALLSYMS
75038 char sym[KSYM_SYMBOL_LEN];
75039- if (ext != 'f' && ext != 's')
75040+ if (ext != 'f' && ext != 's' && ext != 'a')
75041 sprint_symbol(sym, value);
75042 else
75043 kallsyms_lookup(value, NULL, NULL, NULL, sym);
75044@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
75045 * - 'f' For simple symbolic function names without offset
75046 * - 'S' For symbolic direct pointers with offset
75047 * - 's' For symbolic direct pointers without offset
75048+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
75049+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
75050 * - 'R' For a struct resource pointer, it prints the range of
75051 * addresses (not the name nor the flags)
75052 * - 'M' For a 6-byte MAC address, it prints the address in the
75053@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
75054 struct printf_spec spec)
75055 {
75056 if (!ptr)
75057- return string(buf, end, "(null)", spec);
75058+ return string(buf, end, "(nil)", spec);
75059
75060 switch (*fmt) {
75061 case 'F':
75062@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
75063 case 's':
75064 /* Fallthrough */
75065 case 'S':
75066+#ifdef CONFIG_GRKERNSEC_HIDESYM
75067+ break;
75068+#else
75069+ return symbol_string(buf, end, ptr, spec, *fmt);
75070+#endif
75071+ case 'a':
75072+ /* Fallthrough */
75073+ case 'A':
75074 return symbol_string(buf, end, ptr, spec, *fmt);
75075 case 'R':
75076 return resource_string(buf, end, ptr, spec);
75077@@ -1445,7 +1458,7 @@ do { \
75078 size_t len;
75079 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
75080 || (unsigned long)save_str < PAGE_SIZE)
75081- save_str = "<NULL>";
75082+ save_str = "(null)";
75083 len = strlen(save_str);
75084 if (str + len + 1 < end)
75085 memcpy(str, save_str, len + 1);
75086@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
75087 typeof(type) value; \
75088 if (sizeof(type) == 8) { \
75089 args = PTR_ALIGN(args, sizeof(u32)); \
75090- *(u32 *)&value = *(u32 *)args; \
75091- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
75092+ *(u32 *)&value = *(const u32 *)args; \
75093+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
75094 } else { \
75095 args = PTR_ALIGN(args, sizeof(type)); \
75096- value = *(typeof(type) *)args; \
75097+ value = *(const typeof(type) *)args; \
75098 } \
75099 args += sizeof(type); \
75100 value; \
75101@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
75102 const char *str_arg = args;
75103 size_t len = strlen(str_arg);
75104 args += len + 1;
75105- str = string(str, end, (char *)str_arg, spec);
75106+ str = string(str, end, str_arg, spec);
75107 break;
75108 }
75109
75110diff --git a/localversion-grsec b/localversion-grsec
75111new file mode 100644
75112index 0000000..7cd6065
75113--- /dev/null
75114+++ b/localversion-grsec
75115@@ -0,0 +1 @@
75116+-grsec
75117diff --git a/mm/Kconfig b/mm/Kconfig
75118index 2c19c0b..f3c3f83 100644
75119--- a/mm/Kconfig
75120+++ b/mm/Kconfig
75121@@ -228,7 +228,7 @@ config KSM
75122 config DEFAULT_MMAP_MIN_ADDR
75123 int "Low address space to protect from user allocation"
75124 depends on MMU
75125- default 4096
75126+ default 65536
75127 help
75128 This is the portion of low virtual memory which should be protected
75129 from userspace allocation. Keeping a user from writing to low pages
75130diff --git a/mm/backing-dev.c b/mm/backing-dev.c
75131index 67a33a5..094dcf1 100644
75132--- a/mm/backing-dev.c
75133+++ b/mm/backing-dev.c
75134@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
75135 list_add_tail_rcu(&wb->list, &bdi->wb_list);
75136 spin_unlock(&bdi->wb_lock);
75137
75138- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
75139+ tsk->flags |= PF_SWAPWRITE;
75140 set_freezable();
75141
75142 /*
75143@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
75144 * Add the default flusher task that gets created for any bdi
75145 * that has dirty data pending writeout
75146 */
75147-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
75148+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
75149 {
75150 if (!bdi_cap_writeback_dirty(bdi))
75151 return;
75152diff --git a/mm/filemap.c b/mm/filemap.c
75153index 9e0826e..4ee8f13 100644
75154--- a/mm/filemap.c
75155+++ b/mm/filemap.c
75156@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
75157 struct address_space *mapping = file->f_mapping;
75158
75159 if (!mapping->a_ops->readpage)
75160- return -ENOEXEC;
75161+ return -ENODEV;
75162 file_accessed(file);
75163 vma->vm_ops = &generic_file_vm_ops;
75164 vma->vm_flags |= VM_CAN_NONLINEAR;
75165@@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
75166 *pos = i_size_read(inode);
75167
75168 if (limit != RLIM_INFINITY) {
75169+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
75170 if (*pos >= limit) {
75171 send_sig(SIGXFSZ, current, 0);
75172 return -EFBIG;
75173diff --git a/mm/fremap.c b/mm/fremap.c
75174index b6ec85a..a24ac22 100644
75175--- a/mm/fremap.c
75176+++ b/mm/fremap.c
75177@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
75178 retry:
75179 vma = find_vma(mm, start);
75180
75181+#ifdef CONFIG_PAX_SEGMEXEC
75182+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
75183+ goto out;
75184+#endif
75185+
75186 /*
75187 * Make sure the vma is shared, that it supports prefaulting,
75188 * and that the remapped range is valid and fully within
75189@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
75190 /*
75191 * drop PG_Mlocked flag for over-mapped range
75192 */
75193- unsigned int saved_flags = vma->vm_flags;
75194+ unsigned long saved_flags = vma->vm_flags;
75195 munlock_vma_pages_range(vma, start, start + size);
75196 vma->vm_flags = saved_flags;
75197 }
75198diff --git a/mm/highmem.c b/mm/highmem.c
75199index 9c1e627..5ca9447 100644
75200--- a/mm/highmem.c
75201+++ b/mm/highmem.c
75202@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
75203 * So no dangers, even with speculative execution.
75204 */
75205 page = pte_page(pkmap_page_table[i]);
75206+ pax_open_kernel();
75207 pte_clear(&init_mm, (unsigned long)page_address(page),
75208 &pkmap_page_table[i]);
75209-
75210+ pax_close_kernel();
75211 set_page_address(page, NULL);
75212 need_flush = 1;
75213 }
75214@@ -177,9 +178,11 @@ start:
75215 }
75216 }
75217 vaddr = PKMAP_ADDR(last_pkmap_nr);
75218+
75219+ pax_open_kernel();
75220 set_pte_at(&init_mm, vaddr,
75221 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
75222-
75223+ pax_close_kernel();
75224 pkmap_count[last_pkmap_nr] = 1;
75225 set_page_address(page, (void *)vaddr);
75226
75227diff --git a/mm/hugetlb.c b/mm/hugetlb.c
75228index 5e1e508..9f0ebad 100644
75229--- a/mm/hugetlb.c
75230+++ b/mm/hugetlb.c
75231@@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
75232 return 1;
75233 }
75234
75235+#ifdef CONFIG_PAX_SEGMEXEC
75236+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
75237+{
75238+ struct mm_struct *mm = vma->vm_mm;
75239+ struct vm_area_struct *vma_m;
75240+ unsigned long address_m;
75241+ pte_t *ptep_m;
75242+
75243+ vma_m = pax_find_mirror_vma(vma);
75244+ if (!vma_m)
75245+ return;
75246+
75247+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75248+ address_m = address + SEGMEXEC_TASK_SIZE;
75249+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
75250+ get_page(page_m);
75251+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
75252+}
75253+#endif
75254+
75255 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
75256 unsigned long address, pte_t *ptep, pte_t pte,
75257 struct page *pagecache_page)
75258@@ -2004,6 +2024,11 @@ retry_avoidcopy:
75259 huge_ptep_clear_flush(vma, address, ptep);
75260 set_huge_pte_at(mm, address, ptep,
75261 make_huge_pte(vma, new_page, 1));
75262+
75263+#ifdef CONFIG_PAX_SEGMEXEC
75264+ pax_mirror_huge_pte(vma, address, new_page);
75265+#endif
75266+
75267 /* Make the old page be freed below */
75268 new_page = old_page;
75269 }
75270@@ -2135,6 +2160,10 @@ retry:
75271 && (vma->vm_flags & VM_SHARED)));
75272 set_huge_pte_at(mm, address, ptep, new_pte);
75273
75274+#ifdef CONFIG_PAX_SEGMEXEC
75275+ pax_mirror_huge_pte(vma, address, page);
75276+#endif
75277+
75278 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
75279 /* Optimization, do the COW without a second fault */
75280 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
75281@@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
75282 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
75283 struct hstate *h = hstate_vma(vma);
75284
75285+#ifdef CONFIG_PAX_SEGMEXEC
75286+ struct vm_area_struct *vma_m;
75287+
75288+ vma_m = pax_find_mirror_vma(vma);
75289+ if (vma_m) {
75290+ unsigned long address_m;
75291+
75292+ if (vma->vm_start > vma_m->vm_start) {
75293+ address_m = address;
75294+ address -= SEGMEXEC_TASK_SIZE;
75295+ vma = vma_m;
75296+ h = hstate_vma(vma);
75297+ } else
75298+ address_m = address + SEGMEXEC_TASK_SIZE;
75299+
75300+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
75301+ return VM_FAULT_OOM;
75302+ address_m &= HPAGE_MASK;
75303+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
75304+ }
75305+#endif
75306+
75307 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
75308 if (!ptep)
75309 return VM_FAULT_OOM;
75310diff --git a/mm/internal.h b/mm/internal.h
75311index f03e8e2..7354343 100644
75312--- a/mm/internal.h
75313+++ b/mm/internal.h
75314@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
75315 * in mm/page_alloc.c
75316 */
75317 extern void __free_pages_bootmem(struct page *page, unsigned int order);
75318+extern void free_compound_page(struct page *page);
75319 extern void prep_compound_page(struct page *page, unsigned long order);
75320
75321
75322diff --git a/mm/kmemleak.c b/mm/kmemleak.c
75323index c346660..b47382f 100644
75324--- a/mm/kmemleak.c
75325+++ b/mm/kmemleak.c
75326@@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
75327
75328 for (i = 0; i < object->trace_len; i++) {
75329 void *ptr = (void *)object->trace[i];
75330- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
75331+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
75332 }
75333 }
75334
75335diff --git a/mm/maccess.c b/mm/maccess.c
75336index 9073695..1127f348 100644
75337--- a/mm/maccess.c
75338+++ b/mm/maccess.c
75339@@ -14,7 +14,7 @@
75340 * Safely read from address @src to the buffer at @dst. If a kernel fault
75341 * happens, handle that and return -EFAULT.
75342 */
75343-long probe_kernel_read(void *dst, void *src, size_t size)
75344+long probe_kernel_read(void *dst, const void *src, size_t size)
75345 {
75346 long ret;
75347 mm_segment_t old_fs = get_fs();
75348@@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
75349 set_fs(KERNEL_DS);
75350 pagefault_disable();
75351 ret = __copy_from_user_inatomic(dst,
75352- (__force const void __user *)src, size);
75353+ (const void __force_user *)src, size);
75354 pagefault_enable();
75355 set_fs(old_fs);
75356
75357@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
75358 * Safely write to address @dst from the buffer at @src. If a kernel fault
75359 * happens, handle that and return -EFAULT.
75360 */
75361-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
75362+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
75363 {
75364 long ret;
75365 mm_segment_t old_fs = get_fs();
75366
75367 set_fs(KERNEL_DS);
75368 pagefault_disable();
75369- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
75370+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
75371 pagefault_enable();
75372 set_fs(old_fs);
75373
75374diff --git a/mm/madvise.c b/mm/madvise.c
75375index 35b1479..499f7d4 100644
75376--- a/mm/madvise.c
75377+++ b/mm/madvise.c
75378@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
75379 pgoff_t pgoff;
75380 unsigned long new_flags = vma->vm_flags;
75381
75382+#ifdef CONFIG_PAX_SEGMEXEC
75383+ struct vm_area_struct *vma_m;
75384+#endif
75385+
75386 switch (behavior) {
75387 case MADV_NORMAL:
75388 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
75389@@ -103,6 +107,13 @@ success:
75390 /*
75391 * vm_flags is protected by the mmap_sem held in write mode.
75392 */
75393+
75394+#ifdef CONFIG_PAX_SEGMEXEC
75395+ vma_m = pax_find_mirror_vma(vma);
75396+ if (vma_m)
75397+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
75398+#endif
75399+
75400 vma->vm_flags = new_flags;
75401
75402 out:
75403@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
75404 struct vm_area_struct ** prev,
75405 unsigned long start, unsigned long end)
75406 {
75407+
75408+#ifdef CONFIG_PAX_SEGMEXEC
75409+ struct vm_area_struct *vma_m;
75410+#endif
75411+
75412 *prev = vma;
75413 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
75414 return -EINVAL;
75415@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
75416 zap_page_range(vma, start, end - start, &details);
75417 } else
75418 zap_page_range(vma, start, end - start, NULL);
75419+
75420+#ifdef CONFIG_PAX_SEGMEXEC
75421+ vma_m = pax_find_mirror_vma(vma);
75422+ if (vma_m) {
75423+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
75424+ struct zap_details details = {
75425+ .nonlinear_vma = vma_m,
75426+ .last_index = ULONG_MAX,
75427+ };
75428+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
75429+ } else
75430+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
75431+ }
75432+#endif
75433+
75434 return 0;
75435 }
75436
75437@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
75438 if (end < start)
75439 goto out;
75440
75441+#ifdef CONFIG_PAX_SEGMEXEC
75442+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
75443+ if (end > SEGMEXEC_TASK_SIZE)
75444+ goto out;
75445+ } else
75446+#endif
75447+
75448+ if (end > TASK_SIZE)
75449+ goto out;
75450+
75451 error = 0;
75452 if (end == start)
75453 goto out;
75454diff --git a/mm/memory-failure.c b/mm/memory-failure.c
75455index 8aeba53..b4a4198 100644
75456--- a/mm/memory-failure.c
75457+++ b/mm/memory-failure.c
75458@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
75459
75460 int sysctl_memory_failure_recovery __read_mostly = 1;
75461
75462-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
75463+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
75464
75465 /*
75466 * Send all the processes who have the page mapped an ``action optional''
75467@@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
75468 si.si_signo = SIGBUS;
75469 si.si_errno = 0;
75470 si.si_code = BUS_MCEERR_AO;
75471- si.si_addr = (void *)addr;
75472+ si.si_addr = (void __user *)addr;
75473 #ifdef __ARCH_SI_TRAPNO
75474 si.si_trapno = trapno;
75475 #endif
75476@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
75477 return 0;
75478 }
75479
75480- atomic_long_add(1, &mce_bad_pages);
75481+ atomic_long_add_unchecked(1, &mce_bad_pages);
75482
75483 /*
75484 * We need/can do nothing about count=0 pages.
75485diff --git a/mm/memory.c b/mm/memory.c
75486index 6c836d3..48f3264 100644
75487--- a/mm/memory.c
75488+++ b/mm/memory.c
75489@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
75490 return;
75491
75492 pmd = pmd_offset(pud, start);
75493+
75494+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
75495 pud_clear(pud);
75496 pmd_free_tlb(tlb, pmd, start);
75497+#endif
75498+
75499 }
75500
75501 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
75502@@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
75503 if (end - 1 > ceiling - 1)
75504 return;
75505
75506+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
75507 pud = pud_offset(pgd, start);
75508 pgd_clear(pgd);
75509 pud_free_tlb(tlb, pud, start);
75510+#endif
75511+
75512 }
75513
75514 /*
75515@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
75516 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
75517 i = 0;
75518
75519- do {
75520+ while (nr_pages) {
75521 struct vm_area_struct *vma;
75522
75523- vma = find_extend_vma(mm, start);
75524+ vma = find_vma(mm, start);
75525 if (!vma && in_gate_area(tsk, start)) {
75526 unsigned long pg = start & PAGE_MASK;
75527 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
75528@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
75529 continue;
75530 }
75531
75532- if (!vma ||
75533+ if (!vma || start < vma->vm_start ||
75534 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
75535 !(vm_flags & vma->vm_flags))
75536 return i ? : -EFAULT;
75537@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
75538 start += PAGE_SIZE;
75539 nr_pages--;
75540 } while (nr_pages && start < vma->vm_end);
75541- } while (nr_pages);
75542+ }
75543 return i;
75544 }
75545
75546@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
75547 page_add_file_rmap(page);
75548 set_pte_at(mm, addr, pte, mk_pte(page, prot));
75549
75550+#ifdef CONFIG_PAX_SEGMEXEC
75551+ pax_mirror_file_pte(vma, addr, page, ptl);
75552+#endif
75553+
75554 retval = 0;
75555 pte_unmap_unlock(pte, ptl);
75556 return retval;
75557@@ -1560,10 +1571,22 @@ out:
75558 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
75559 struct page *page)
75560 {
75561+
75562+#ifdef CONFIG_PAX_SEGMEXEC
75563+ struct vm_area_struct *vma_m;
75564+#endif
75565+
75566 if (addr < vma->vm_start || addr >= vma->vm_end)
75567 return -EFAULT;
75568 if (!page_count(page))
75569 return -EINVAL;
75570+
75571+#ifdef CONFIG_PAX_SEGMEXEC
75572+ vma_m = pax_find_mirror_vma(vma);
75573+ if (vma_m)
75574+ vma_m->vm_flags |= VM_INSERTPAGE;
75575+#endif
75576+
75577 vma->vm_flags |= VM_INSERTPAGE;
75578 return insert_page(vma, addr, page, vma->vm_page_prot);
75579 }
75580@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
75581 unsigned long pfn)
75582 {
75583 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
75584+ BUG_ON(vma->vm_mirror);
75585
75586 if (addr < vma->vm_start || addr >= vma->vm_end)
75587 return -EFAULT;
75588@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
75589 copy_user_highpage(dst, src, va, vma);
75590 }
75591
75592+#ifdef CONFIG_PAX_SEGMEXEC
75593+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
75594+{
75595+ struct mm_struct *mm = vma->vm_mm;
75596+ spinlock_t *ptl;
75597+ pte_t *pte, entry;
75598+
75599+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
75600+ entry = *pte;
75601+ if (!pte_present(entry)) {
75602+ if (!pte_none(entry)) {
75603+ BUG_ON(pte_file(entry));
75604+ free_swap_and_cache(pte_to_swp_entry(entry));
75605+ pte_clear_not_present_full(mm, address, pte, 0);
75606+ }
75607+ } else {
75608+ struct page *page;
75609+
75610+ flush_cache_page(vma, address, pte_pfn(entry));
75611+ entry = ptep_clear_flush(vma, address, pte);
75612+ BUG_ON(pte_dirty(entry));
75613+ page = vm_normal_page(vma, address, entry);
75614+ if (page) {
75615+ update_hiwater_rss(mm);
75616+ if (PageAnon(page))
75617+ dec_mm_counter(mm, anon_rss);
75618+ else
75619+ dec_mm_counter(mm, file_rss);
75620+ page_remove_rmap(page);
75621+ page_cache_release(page);
75622+ }
75623+ }
75624+ pte_unmap_unlock(pte, ptl);
75625+}
75626+
75627+/* PaX: if vma is mirrored, synchronize the mirror's PTE
75628+ *
75629+ * the ptl of the lower mapped page is held on entry and is not released on exit
75630+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
75631+ */
75632+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
75633+{
75634+ struct mm_struct *mm = vma->vm_mm;
75635+ unsigned long address_m;
75636+ spinlock_t *ptl_m;
75637+ struct vm_area_struct *vma_m;
75638+ pmd_t *pmd_m;
75639+ pte_t *pte_m, entry_m;
75640+
75641+ BUG_ON(!page_m || !PageAnon(page_m));
75642+
75643+ vma_m = pax_find_mirror_vma(vma);
75644+ if (!vma_m)
75645+ return;
75646+
75647+ BUG_ON(!PageLocked(page_m));
75648+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75649+ address_m = address + SEGMEXEC_TASK_SIZE;
75650+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
75651+ pte_m = pte_offset_map_nested(pmd_m, address_m);
75652+ ptl_m = pte_lockptr(mm, pmd_m);
75653+ if (ptl != ptl_m) {
75654+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
75655+ if (!pte_none(*pte_m))
75656+ goto out;
75657+ }
75658+
75659+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
75660+ page_cache_get(page_m);
75661+ page_add_anon_rmap(page_m, vma_m, address_m);
75662+ inc_mm_counter(mm, anon_rss);
75663+ set_pte_at(mm, address_m, pte_m, entry_m);
75664+ update_mmu_cache(vma_m, address_m, entry_m);
75665+out:
75666+ if (ptl != ptl_m)
75667+ spin_unlock(ptl_m);
75668+ pte_unmap_nested(pte_m);
75669+ unlock_page(page_m);
75670+}
75671+
75672+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
75673+{
75674+ struct mm_struct *mm = vma->vm_mm;
75675+ unsigned long address_m;
75676+ spinlock_t *ptl_m;
75677+ struct vm_area_struct *vma_m;
75678+ pmd_t *pmd_m;
75679+ pte_t *pte_m, entry_m;
75680+
75681+ BUG_ON(!page_m || PageAnon(page_m));
75682+
75683+ vma_m = pax_find_mirror_vma(vma);
75684+ if (!vma_m)
75685+ return;
75686+
75687+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75688+ address_m = address + SEGMEXEC_TASK_SIZE;
75689+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
75690+ pte_m = pte_offset_map_nested(pmd_m, address_m);
75691+ ptl_m = pte_lockptr(mm, pmd_m);
75692+ if (ptl != ptl_m) {
75693+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
75694+ if (!pte_none(*pte_m))
75695+ goto out;
75696+ }
75697+
75698+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
75699+ page_cache_get(page_m);
75700+ page_add_file_rmap(page_m);
75701+ inc_mm_counter(mm, file_rss);
75702+ set_pte_at(mm, address_m, pte_m, entry_m);
75703+ update_mmu_cache(vma_m, address_m, entry_m);
75704+out:
75705+ if (ptl != ptl_m)
75706+ spin_unlock(ptl_m);
75707+ pte_unmap_nested(pte_m);
75708+}
75709+
75710+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
75711+{
75712+ struct mm_struct *mm = vma->vm_mm;
75713+ unsigned long address_m;
75714+ spinlock_t *ptl_m;
75715+ struct vm_area_struct *vma_m;
75716+ pmd_t *pmd_m;
75717+ pte_t *pte_m, entry_m;
75718+
75719+ vma_m = pax_find_mirror_vma(vma);
75720+ if (!vma_m)
75721+ return;
75722+
75723+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75724+ address_m = address + SEGMEXEC_TASK_SIZE;
75725+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
75726+ pte_m = pte_offset_map_nested(pmd_m, address_m);
75727+ ptl_m = pte_lockptr(mm, pmd_m);
75728+ if (ptl != ptl_m) {
75729+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
75730+ if (!pte_none(*pte_m))
75731+ goto out;
75732+ }
75733+
75734+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
75735+ set_pte_at(mm, address_m, pte_m, entry_m);
75736+out:
75737+ if (ptl != ptl_m)
75738+ spin_unlock(ptl_m);
75739+ pte_unmap_nested(pte_m);
75740+}
75741+
75742+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
75743+{
75744+ struct page *page_m;
75745+ pte_t entry;
75746+
75747+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
75748+ goto out;
75749+
75750+ entry = *pte;
75751+ page_m = vm_normal_page(vma, address, entry);
75752+ if (!page_m)
75753+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
75754+ else if (PageAnon(page_m)) {
75755+ if (pax_find_mirror_vma(vma)) {
75756+ pte_unmap_unlock(pte, ptl);
75757+ lock_page(page_m);
75758+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
75759+ if (pte_same(entry, *pte))
75760+ pax_mirror_anon_pte(vma, address, page_m, ptl);
75761+ else
75762+ unlock_page(page_m);
75763+ }
75764+ } else
75765+ pax_mirror_file_pte(vma, address, page_m, ptl);
75766+
75767+out:
75768+ pte_unmap_unlock(pte, ptl);
75769+}
75770+#endif
75771+
75772 /*
75773 * This routine handles present pages, when users try to write
75774 * to a shared page. It is done by copying the page to a new address
75775@@ -2156,6 +2360,12 @@ gotten:
75776 */
75777 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
75778 if (likely(pte_same(*page_table, orig_pte))) {
75779+
75780+#ifdef CONFIG_PAX_SEGMEXEC
75781+ if (pax_find_mirror_vma(vma))
75782+ BUG_ON(!trylock_page(new_page));
75783+#endif
75784+
75785 if (old_page) {
75786 if (!PageAnon(old_page)) {
75787 dec_mm_counter(mm, file_rss);
75788@@ -2207,6 +2417,10 @@ gotten:
75789 page_remove_rmap(old_page);
75790 }
75791
75792+#ifdef CONFIG_PAX_SEGMEXEC
75793+ pax_mirror_anon_pte(vma, address, new_page, ptl);
75794+#endif
75795+
75796 /* Free the old page.. */
75797 new_page = old_page;
75798 ret |= VM_FAULT_WRITE;
75799@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
75800 swap_free(entry);
75801 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
75802 try_to_free_swap(page);
75803+
75804+#ifdef CONFIG_PAX_SEGMEXEC
75805+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
75806+#endif
75807+
75808 unlock_page(page);
75809
75810 if (flags & FAULT_FLAG_WRITE) {
75811@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
75812
75813 /* No need to invalidate - it was non-present before */
75814 update_mmu_cache(vma, address, pte);
75815+
75816+#ifdef CONFIG_PAX_SEGMEXEC
75817+ pax_mirror_anon_pte(vma, address, page, ptl);
75818+#endif
75819+
75820 unlock:
75821 pte_unmap_unlock(page_table, ptl);
75822 out:
75823@@ -2632,40 +2856,6 @@ out_release:
75824 }
75825
75826 /*
75827- * This is like a special single-page "expand_{down|up}wards()",
75828- * except we must first make sure that 'address{-|+}PAGE_SIZE'
75829- * doesn't hit another vma.
75830- */
75831-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
75832-{
75833- address &= PAGE_MASK;
75834- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
75835- struct vm_area_struct *prev = vma->vm_prev;
75836-
75837- /*
75838- * Is there a mapping abutting this one below?
75839- *
75840- * That's only ok if it's the same stack mapping
75841- * that has gotten split..
75842- */
75843- if (prev && prev->vm_end == address)
75844- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
75845-
75846- expand_stack(vma, address - PAGE_SIZE);
75847- }
75848- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
75849- struct vm_area_struct *next = vma->vm_next;
75850-
75851- /* As VM_GROWSDOWN but s/below/above/ */
75852- if (next && next->vm_start == address + PAGE_SIZE)
75853- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
75854-
75855- expand_upwards(vma, address + PAGE_SIZE);
75856- }
75857- return 0;
75858-}
75859-
75860-/*
75861 * We enter with non-exclusive mmap_sem (to exclude vma changes,
75862 * but allow concurrent faults), and pte mapped but not yet locked.
75863 * We return with mmap_sem still held, but pte unmapped and unlocked.
75864@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
75865 unsigned long address, pte_t *page_table, pmd_t *pmd,
75866 unsigned int flags)
75867 {
75868- struct page *page;
75869+ struct page *page = NULL;
75870 spinlock_t *ptl;
75871 pte_t entry;
75872
75873- pte_unmap(page_table);
75874-
75875- /* Check if we need to add a guard page to the stack */
75876- if (check_stack_guard_page(vma, address) < 0)
75877- return VM_FAULT_SIGBUS;
75878-
75879- /* Use the zero-page for reads */
75880 if (!(flags & FAULT_FLAG_WRITE)) {
75881 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
75882 vma->vm_page_prot));
75883- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
75884+ ptl = pte_lockptr(mm, pmd);
75885+ spin_lock(ptl);
75886 if (!pte_none(*page_table))
75887 goto unlock;
75888 goto setpte;
75889 }
75890
75891 /* Allocate our own private page. */
75892+ pte_unmap(page_table);
75893+
75894 if (unlikely(anon_vma_prepare(vma)))
75895 goto oom;
75896 page = alloc_zeroed_user_highpage_movable(vma, address);
75897@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
75898 if (!pte_none(*page_table))
75899 goto release;
75900
75901+#ifdef CONFIG_PAX_SEGMEXEC
75902+ if (pax_find_mirror_vma(vma))
75903+ BUG_ON(!trylock_page(page));
75904+#endif
75905+
75906 inc_mm_counter(mm, anon_rss);
75907 page_add_new_anon_rmap(page, vma, address);
75908 setpte:
75909@@ -2720,6 +2911,12 @@ setpte:
75910
75911 /* No need to invalidate - it was non-present before */
75912 update_mmu_cache(vma, address, entry);
75913+
75914+#ifdef CONFIG_PAX_SEGMEXEC
75915+ if (page)
75916+ pax_mirror_anon_pte(vma, address, page, ptl);
75917+#endif
75918+
75919 unlock:
75920 pte_unmap_unlock(page_table, ptl);
75921 return 0;
75922@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
75923 */
75924 /* Only go through if we didn't race with anybody else... */
75925 if (likely(pte_same(*page_table, orig_pte))) {
75926+
75927+#ifdef CONFIG_PAX_SEGMEXEC
75928+ if (anon && pax_find_mirror_vma(vma))
75929+ BUG_ON(!trylock_page(page));
75930+#endif
75931+
75932 flush_icache_page(vma, page);
75933 entry = mk_pte(page, vma->vm_page_prot);
75934 if (flags & FAULT_FLAG_WRITE)
75935@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
75936
75937 /* no need to invalidate: a not-present page won't be cached */
75938 update_mmu_cache(vma, address, entry);
75939+
75940+#ifdef CONFIG_PAX_SEGMEXEC
75941+ if (anon)
75942+ pax_mirror_anon_pte(vma, address, page, ptl);
75943+ else
75944+ pax_mirror_file_pte(vma, address, page, ptl);
75945+#endif
75946+
75947 } else {
75948 if (charged)
75949 mem_cgroup_uncharge_page(page);
75950@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
75951 if (flags & FAULT_FLAG_WRITE)
75952 flush_tlb_page(vma, address);
75953 }
75954+
75955+#ifdef CONFIG_PAX_SEGMEXEC
75956+ pax_mirror_pte(vma, address, pte, pmd, ptl);
75957+ return 0;
75958+#endif
75959+
75960 unlock:
75961 pte_unmap_unlock(pte, ptl);
75962 return 0;
75963@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
75964 pmd_t *pmd;
75965 pte_t *pte;
75966
75967+#ifdef CONFIG_PAX_SEGMEXEC
75968+ struct vm_area_struct *vma_m;
75969+#endif
75970+
75971 __set_current_state(TASK_RUNNING);
75972
75973 count_vm_event(PGFAULT);
75974@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
75975 if (unlikely(is_vm_hugetlb_page(vma)))
75976 return hugetlb_fault(mm, vma, address, flags);
75977
75978+#ifdef CONFIG_PAX_SEGMEXEC
75979+ vma_m = pax_find_mirror_vma(vma);
75980+ if (vma_m) {
75981+ unsigned long address_m;
75982+ pgd_t *pgd_m;
75983+ pud_t *pud_m;
75984+ pmd_t *pmd_m;
75985+
75986+ if (vma->vm_start > vma_m->vm_start) {
75987+ address_m = address;
75988+ address -= SEGMEXEC_TASK_SIZE;
75989+ vma = vma_m;
75990+ } else
75991+ address_m = address + SEGMEXEC_TASK_SIZE;
75992+
75993+ pgd_m = pgd_offset(mm, address_m);
75994+ pud_m = pud_alloc(mm, pgd_m, address_m);
75995+ if (!pud_m)
75996+ return VM_FAULT_OOM;
75997+ pmd_m = pmd_alloc(mm, pud_m, address_m);
75998+ if (!pmd_m)
75999+ return VM_FAULT_OOM;
76000+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
76001+ return VM_FAULT_OOM;
76002+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
76003+ }
76004+#endif
76005+
76006 pgd = pgd_offset(mm, address);
76007 pud = pud_alloc(mm, pgd, address);
76008 if (!pud)
76009@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
76010 gate_vma.vm_start = FIXADDR_USER_START;
76011 gate_vma.vm_end = FIXADDR_USER_END;
76012 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
76013- gate_vma.vm_page_prot = __P101;
76014+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
76015 /*
76016 * Make sure the vDSO gets into every core dump.
76017 * Dumping its contents makes post-mortem fully interpretable later
76018diff --git a/mm/mempolicy.c b/mm/mempolicy.c
76019index 3c6e3e2..ad9871c 100644
76020--- a/mm/mempolicy.c
76021+++ b/mm/mempolicy.c
76022@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
76023 struct vm_area_struct *next;
76024 int err;
76025
76026+#ifdef CONFIG_PAX_SEGMEXEC
76027+ struct vm_area_struct *vma_m;
76028+#endif
76029+
76030 err = 0;
76031 for (; vma && vma->vm_start < end; vma = next) {
76032 next = vma->vm_next;
76033@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
76034 err = policy_vma(vma, new);
76035 if (err)
76036 break;
76037+
76038+#ifdef CONFIG_PAX_SEGMEXEC
76039+ vma_m = pax_find_mirror_vma(vma);
76040+ if (vma_m) {
76041+ err = policy_vma(vma_m, new);
76042+ if (err)
76043+ break;
76044+ }
76045+#endif
76046+
76047 }
76048 return err;
76049 }
76050@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
76051
76052 if (end < start)
76053 return -EINVAL;
76054+
76055+#ifdef CONFIG_PAX_SEGMEXEC
76056+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
76057+ if (end > SEGMEXEC_TASK_SIZE)
76058+ return -EINVAL;
76059+ } else
76060+#endif
76061+
76062+ if (end > TASK_SIZE)
76063+ return -EINVAL;
76064+
76065 if (end == start)
76066 return 0;
76067
76068@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
76069 if (!mm)
76070 return -EINVAL;
76071
76072+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76073+ if (mm != current->mm &&
76074+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
76075+ err = -EPERM;
76076+ goto out;
76077+ }
76078+#endif
76079+
76080 /*
76081 * Check if this process has the right to modify the specified
76082 * process. The right exists if the process has administrative
76083@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
76084 rcu_read_lock();
76085 tcred = __task_cred(task);
76086 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
76087- cred->uid != tcred->suid && cred->uid != tcred->uid &&
76088- !capable(CAP_SYS_NICE)) {
76089+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
76090 rcu_read_unlock();
76091 err = -EPERM;
76092 goto out;
76093@@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, void *v)
76094
76095 if (file) {
76096 seq_printf(m, " file=");
76097- seq_path(m, &file->f_path, "\n\t= ");
76098+ seq_path(m, &file->f_path, "\n\t\\= ");
76099 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
76100 seq_printf(m, " heap");
76101 } else if (vma->vm_start <= mm->start_stack &&
76102diff --git a/mm/migrate.c b/mm/migrate.c
76103index aaca868..2ebecdc 100644
76104--- a/mm/migrate.c
76105+++ b/mm/migrate.c
76106@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
76107 unsigned long chunk_start;
76108 int err;
76109
76110+ pax_track_stack();
76111+
76112 task_nodes = cpuset_mems_allowed(task);
76113
76114 err = -ENOMEM;
76115@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
76116 if (!mm)
76117 return -EINVAL;
76118
76119+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76120+ if (mm != current->mm &&
76121+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
76122+ err = -EPERM;
76123+ goto out;
76124+ }
76125+#endif
76126+
76127 /*
76128 * Check if this process has the right to modify the specified
76129 * process. The right exists if the process has administrative
76130@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
76131 rcu_read_lock();
76132 tcred = __task_cred(task);
76133 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
76134- cred->uid != tcred->suid && cred->uid != tcred->uid &&
76135- !capable(CAP_SYS_NICE)) {
76136+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
76137 rcu_read_unlock();
76138 err = -EPERM;
76139 goto out;
76140diff --git a/mm/mlock.c b/mm/mlock.c
76141index 2d846cf..98134d2 100644
76142--- a/mm/mlock.c
76143+++ b/mm/mlock.c
76144@@ -13,6 +13,7 @@
76145 #include <linux/pagemap.h>
76146 #include <linux/mempolicy.h>
76147 #include <linux/syscalls.h>
76148+#include <linux/security.h>
76149 #include <linux/sched.h>
76150 #include <linux/module.h>
76151 #include <linux/rmap.h>
76152@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
76153 }
76154 }
76155
76156-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
76157-{
76158- return (vma->vm_flags & VM_GROWSDOWN) &&
76159- (vma->vm_start == addr) &&
76160- !vma_stack_continue(vma->vm_prev, addr);
76161-}
76162-
76163 /**
76164 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
76165 * @vma: target vma
76166@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
76167 if (vma->vm_flags & VM_WRITE)
76168 gup_flags |= FOLL_WRITE;
76169
76170- /* We don't try to access the guard page of a stack vma */
76171- if (stack_guard_page(vma, start)) {
76172- addr += PAGE_SIZE;
76173- nr_pages--;
76174- }
76175-
76176 while (nr_pages > 0) {
76177 int i;
76178
76179@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
76180 {
76181 unsigned long nstart, end, tmp;
76182 struct vm_area_struct * vma, * prev;
76183- int error;
76184+ int error = -EINVAL;
76185
76186 len = PAGE_ALIGN(len);
76187 end = start + len;
76188@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
76189 return -EINVAL;
76190 if (end == start)
76191 return 0;
76192+ if (end > TASK_SIZE)
76193+ return -EINVAL;
76194+
76195 vma = find_vma_prev(current->mm, start, &prev);
76196 if (!vma || vma->vm_start > start)
76197 return -ENOMEM;
76198@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
76199 for (nstart = start ; ; ) {
76200 unsigned int newflags;
76201
76202+#ifdef CONFIG_PAX_SEGMEXEC
76203+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
76204+ break;
76205+#endif
76206+
76207 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
76208
76209 newflags = vma->vm_flags | VM_LOCKED;
76210@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
76211 lock_limit >>= PAGE_SHIFT;
76212
76213 /* check against resource limits */
76214+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
76215 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
76216 error = do_mlock(start, len, 1);
76217 up_write(&current->mm->mmap_sem);
76218@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
76219 static int do_mlockall(int flags)
76220 {
76221 struct vm_area_struct * vma, * prev = NULL;
76222- unsigned int def_flags = 0;
76223
76224 if (flags & MCL_FUTURE)
76225- def_flags = VM_LOCKED;
76226- current->mm->def_flags = def_flags;
76227+ current->mm->def_flags |= VM_LOCKED;
76228+ else
76229+ current->mm->def_flags &= ~VM_LOCKED;
76230 if (flags == MCL_FUTURE)
76231 goto out;
76232
76233 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
76234- unsigned int newflags;
76235+ unsigned long newflags;
76236+
76237+#ifdef CONFIG_PAX_SEGMEXEC
76238+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
76239+ break;
76240+#endif
76241
76242+ BUG_ON(vma->vm_end > TASK_SIZE);
76243 newflags = vma->vm_flags | VM_LOCKED;
76244 if (!(flags & MCL_CURRENT))
76245 newflags &= ~VM_LOCKED;
76246@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
76247 lock_limit >>= PAGE_SHIFT;
76248
76249 ret = -ENOMEM;
76250+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
76251 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
76252 capable(CAP_IPC_LOCK))
76253 ret = do_mlockall(flags);
76254diff --git a/mm/mmap.c b/mm/mmap.c
76255index 4b80cbf..c5ce1df 100644
76256--- a/mm/mmap.c
76257+++ b/mm/mmap.c
76258@@ -45,6 +45,16 @@
76259 #define arch_rebalance_pgtables(addr, len) (addr)
76260 #endif
76261
76262+static inline void verify_mm_writelocked(struct mm_struct *mm)
76263+{
76264+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
76265+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
76266+ up_read(&mm->mmap_sem);
76267+ BUG();
76268+ }
76269+#endif
76270+}
76271+
76272 static void unmap_region(struct mm_struct *mm,
76273 struct vm_area_struct *vma, struct vm_area_struct *prev,
76274 unsigned long start, unsigned long end);
76275@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
76276 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
76277 *
76278 */
76279-pgprot_t protection_map[16] = {
76280+pgprot_t protection_map[16] __read_only = {
76281 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
76282 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
76283 };
76284
76285 pgprot_t vm_get_page_prot(unsigned long vm_flags)
76286 {
76287- return __pgprot(pgprot_val(protection_map[vm_flags &
76288+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
76289 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
76290 pgprot_val(arch_vm_get_page_prot(vm_flags)));
76291+
76292+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
76293+ if (!nx_enabled &&
76294+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
76295+ (vm_flags & (VM_READ | VM_WRITE)))
76296+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
76297+#endif
76298+
76299+ return prot;
76300 }
76301 EXPORT_SYMBOL(vm_get_page_prot);
76302
76303 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
76304 int sysctl_overcommit_ratio = 50; /* default is 50% */
76305 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
76306+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
76307 struct percpu_counter vm_committed_as;
76308
76309 /*
76310@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
76311 struct vm_area_struct *next = vma->vm_next;
76312
76313 might_sleep();
76314+ BUG_ON(vma->vm_mirror);
76315 if (vma->vm_ops && vma->vm_ops->close)
76316 vma->vm_ops->close(vma);
76317 if (vma->vm_file) {
76318@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
76319 * not page aligned -Ram Gupta
76320 */
76321 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
76322+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
76323 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
76324 (mm->end_data - mm->start_data) > rlim)
76325 goto out;
76326@@ -704,6 +726,12 @@ static int
76327 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
76328 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
76329 {
76330+
76331+#ifdef CONFIG_PAX_SEGMEXEC
76332+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
76333+ return 0;
76334+#endif
76335+
76336 if (is_mergeable_vma(vma, file, vm_flags) &&
76337 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
76338 if (vma->vm_pgoff == vm_pgoff)
76339@@ -723,6 +751,12 @@ static int
76340 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
76341 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
76342 {
76343+
76344+#ifdef CONFIG_PAX_SEGMEXEC
76345+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
76346+ return 0;
76347+#endif
76348+
76349 if (is_mergeable_vma(vma, file, vm_flags) &&
76350 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
76351 pgoff_t vm_pglen;
76352@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
76353 struct vm_area_struct *vma_merge(struct mm_struct *mm,
76354 struct vm_area_struct *prev, unsigned long addr,
76355 unsigned long end, unsigned long vm_flags,
76356- struct anon_vma *anon_vma, struct file *file,
76357+ struct anon_vma *anon_vma, struct file *file,
76358 pgoff_t pgoff, struct mempolicy *policy)
76359 {
76360 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
76361 struct vm_area_struct *area, *next;
76362
76363+#ifdef CONFIG_PAX_SEGMEXEC
76364+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
76365+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
76366+
76367+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
76368+#endif
76369+
76370 /*
76371 * We later require that vma->vm_flags == vm_flags,
76372 * so this tests vma->vm_flags & VM_SPECIAL, too.
76373@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
76374 if (next && next->vm_end == end) /* cases 6, 7, 8 */
76375 next = next->vm_next;
76376
76377+#ifdef CONFIG_PAX_SEGMEXEC
76378+ if (prev)
76379+ prev_m = pax_find_mirror_vma(prev);
76380+ if (area)
76381+ area_m = pax_find_mirror_vma(area);
76382+ if (next)
76383+ next_m = pax_find_mirror_vma(next);
76384+#endif
76385+
76386 /*
76387 * Can it merge with the predecessor?
76388 */
76389@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
76390 /* cases 1, 6 */
76391 vma_adjust(prev, prev->vm_start,
76392 next->vm_end, prev->vm_pgoff, NULL);
76393- } else /* cases 2, 5, 7 */
76394+
76395+#ifdef CONFIG_PAX_SEGMEXEC
76396+ if (prev_m)
76397+ vma_adjust(prev_m, prev_m->vm_start,
76398+ next_m->vm_end, prev_m->vm_pgoff, NULL);
76399+#endif
76400+
76401+ } else { /* cases 2, 5, 7 */
76402 vma_adjust(prev, prev->vm_start,
76403 end, prev->vm_pgoff, NULL);
76404+
76405+#ifdef CONFIG_PAX_SEGMEXEC
76406+ if (prev_m)
76407+ vma_adjust(prev_m, prev_m->vm_start,
76408+ end_m, prev_m->vm_pgoff, NULL);
76409+#endif
76410+
76411+ }
76412 return prev;
76413 }
76414
76415@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
76416 mpol_equal(policy, vma_policy(next)) &&
76417 can_vma_merge_before(next, vm_flags,
76418 anon_vma, file, pgoff+pglen)) {
76419- if (prev && addr < prev->vm_end) /* case 4 */
76420+ if (prev && addr < prev->vm_end) { /* case 4 */
76421 vma_adjust(prev, prev->vm_start,
76422 addr, prev->vm_pgoff, NULL);
76423- else /* cases 3, 8 */
76424+
76425+#ifdef CONFIG_PAX_SEGMEXEC
76426+ if (prev_m)
76427+ vma_adjust(prev_m, prev_m->vm_start,
76428+ addr_m, prev_m->vm_pgoff, NULL);
76429+#endif
76430+
76431+ } else { /* cases 3, 8 */
76432 vma_adjust(area, addr, next->vm_end,
76433 next->vm_pgoff - pglen, NULL);
76434+
76435+#ifdef CONFIG_PAX_SEGMEXEC
76436+ if (area_m)
76437+ vma_adjust(area_m, addr_m, next_m->vm_end,
76438+ next_m->vm_pgoff - pglen, NULL);
76439+#endif
76440+
76441+ }
76442 return area;
76443 }
76444
76445@@ -898,14 +978,11 @@ none:
76446 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
76447 struct file *file, long pages)
76448 {
76449- const unsigned long stack_flags
76450- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
76451-
76452 if (file) {
76453 mm->shared_vm += pages;
76454 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
76455 mm->exec_vm += pages;
76456- } else if (flags & stack_flags)
76457+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
76458 mm->stack_vm += pages;
76459 if (flags & (VM_RESERVED|VM_IO))
76460 mm->reserved_vm += pages;
76461@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76462 * (the exception is when the underlying filesystem is noexec
76463 * mounted, in which case we dont add PROT_EXEC.)
76464 */
76465- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
76466+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
76467 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
76468 prot |= PROT_EXEC;
76469
76470@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76471 /* Obtain the address to map to. we verify (or select) it and ensure
76472 * that it represents a valid section of the address space.
76473 */
76474- addr = get_unmapped_area(file, addr, len, pgoff, flags);
76475+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
76476 if (addr & ~PAGE_MASK)
76477 return addr;
76478
76479@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76480 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
76481 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
76482
76483+#ifdef CONFIG_PAX_MPROTECT
76484+ if (mm->pax_flags & MF_PAX_MPROTECT) {
76485+#ifndef CONFIG_PAX_MPROTECT_COMPAT
76486+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
76487+ gr_log_rwxmmap(file);
76488+
76489+#ifdef CONFIG_PAX_EMUPLT
76490+ vm_flags &= ~VM_EXEC;
76491+#else
76492+ return -EPERM;
76493+#endif
76494+
76495+ }
76496+
76497+ if (!(vm_flags & VM_EXEC))
76498+ vm_flags &= ~VM_MAYEXEC;
76499+#else
76500+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
76501+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
76502+#endif
76503+ else
76504+ vm_flags &= ~VM_MAYWRITE;
76505+ }
76506+#endif
76507+
76508+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
76509+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
76510+ vm_flags &= ~VM_PAGEEXEC;
76511+#endif
76512+
76513 if (flags & MAP_LOCKED)
76514 if (!can_do_mlock())
76515 return -EPERM;
76516@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76517 locked += mm->locked_vm;
76518 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
76519 lock_limit >>= PAGE_SHIFT;
76520+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
76521 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
76522 return -EAGAIN;
76523 }
76524@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76525 if (error)
76526 return error;
76527
76528+ if (!gr_acl_handle_mmap(file, prot))
76529+ return -EACCES;
76530+
76531 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
76532 }
76533 EXPORT_SYMBOL(do_mmap_pgoff);
76534@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
76535 */
76536 int vma_wants_writenotify(struct vm_area_struct *vma)
76537 {
76538- unsigned int vm_flags = vma->vm_flags;
76539+ unsigned long vm_flags = vma->vm_flags;
76540
76541 /* If it was private or non-writable, the write bit is already clear */
76542- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
76543+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
76544 return 0;
76545
76546 /* The backer wishes to know when pages are first written to? */
76547@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
76548 unsigned long charged = 0;
76549 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
76550
76551+#ifdef CONFIG_PAX_SEGMEXEC
76552+ struct vm_area_struct *vma_m = NULL;
76553+#endif
76554+
76555+ /*
76556+ * mm->mmap_sem is required to protect against another thread
76557+ * changing the mappings in case we sleep.
76558+ */
76559+ verify_mm_writelocked(mm);
76560+
76561 /* Clear old maps */
76562 error = -ENOMEM;
76563-munmap_back:
76564 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
76565 if (vma && vma->vm_start < addr + len) {
76566 if (do_munmap(mm, addr, len))
76567 return -ENOMEM;
76568- goto munmap_back;
76569+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
76570+ BUG_ON(vma && vma->vm_start < addr + len);
76571 }
76572
76573 /* Check against address space limit. */
76574@@ -1173,6 +1294,16 @@ munmap_back:
76575 goto unacct_error;
76576 }
76577
76578+#ifdef CONFIG_PAX_SEGMEXEC
76579+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
76580+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
76581+ if (!vma_m) {
76582+ error = -ENOMEM;
76583+ goto free_vma;
76584+ }
76585+ }
76586+#endif
76587+
76588 vma->vm_mm = mm;
76589 vma->vm_start = addr;
76590 vma->vm_end = addr + len;
76591@@ -1195,6 +1326,19 @@ munmap_back:
76592 error = file->f_op->mmap(file, vma);
76593 if (error)
76594 goto unmap_and_free_vma;
76595+
76596+#ifdef CONFIG_PAX_SEGMEXEC
76597+ if (vma_m && (vm_flags & VM_EXECUTABLE))
76598+ added_exe_file_vma(mm);
76599+#endif
76600+
76601+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
76602+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
76603+ vma->vm_flags |= VM_PAGEEXEC;
76604+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
76605+ }
76606+#endif
76607+
76608 if (vm_flags & VM_EXECUTABLE)
76609 added_exe_file_vma(mm);
76610
76611@@ -1218,6 +1362,11 @@ munmap_back:
76612 vma_link(mm, vma, prev, rb_link, rb_parent);
76613 file = vma->vm_file;
76614
76615+#ifdef CONFIG_PAX_SEGMEXEC
76616+ if (vma_m)
76617+ pax_mirror_vma(vma_m, vma);
76618+#endif
76619+
76620 /* Once vma denies write, undo our temporary denial count */
76621 if (correct_wcount)
76622 atomic_inc(&inode->i_writecount);
76623@@ -1226,6 +1375,7 @@ out:
76624
76625 mm->total_vm += len >> PAGE_SHIFT;
76626 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
76627+ track_exec_limit(mm, addr, addr + len, vm_flags);
76628 if (vm_flags & VM_LOCKED) {
76629 /*
76630 * makes pages present; downgrades, drops, reacquires mmap_sem
76631@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
76632 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
76633 charged = 0;
76634 free_vma:
76635+
76636+#ifdef CONFIG_PAX_SEGMEXEC
76637+ if (vma_m)
76638+ kmem_cache_free(vm_area_cachep, vma_m);
76639+#endif
76640+
76641 kmem_cache_free(vm_area_cachep, vma);
76642 unacct_error:
76643 if (charged)
76644@@ -1255,6 +1411,44 @@ unacct_error:
76645 return error;
76646 }
76647
76648+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
76649+{
76650+ if (!vma) {
76651+#ifdef CONFIG_STACK_GROWSUP
76652+ if (addr > sysctl_heap_stack_gap)
76653+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
76654+ else
76655+ vma = find_vma(current->mm, 0);
76656+ if (vma && (vma->vm_flags & VM_GROWSUP))
76657+ return false;
76658+#endif
76659+ return true;
76660+ }
76661+
76662+ if (addr + len > vma->vm_start)
76663+ return false;
76664+
76665+ if (vma->vm_flags & VM_GROWSDOWN)
76666+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
76667+#ifdef CONFIG_STACK_GROWSUP
76668+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
76669+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
76670+#endif
76671+
76672+ return true;
76673+}
76674+
76675+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
76676+{
76677+ if (vma->vm_start < len)
76678+ return -ENOMEM;
76679+ if (!(vma->vm_flags & VM_GROWSDOWN))
76680+ return vma->vm_start - len;
76681+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
76682+ return vma->vm_start - len - sysctl_heap_stack_gap;
76683+ return -ENOMEM;
76684+}
76685+
76686 /* Get an address range which is currently unmapped.
76687 * For shmat() with addr=0.
76688 *
76689@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
76690 if (flags & MAP_FIXED)
76691 return addr;
76692
76693+#ifdef CONFIG_PAX_RANDMMAP
76694+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
76695+#endif
76696+
76697 if (addr) {
76698 addr = PAGE_ALIGN(addr);
76699- vma = find_vma(mm, addr);
76700- if (TASK_SIZE - len >= addr &&
76701- (!vma || addr + len <= vma->vm_start))
76702- return addr;
76703+ if (TASK_SIZE - len >= addr) {
76704+ vma = find_vma(mm, addr);
76705+ if (check_heap_stack_gap(vma, addr, len))
76706+ return addr;
76707+ }
76708 }
76709 if (len > mm->cached_hole_size) {
76710- start_addr = addr = mm->free_area_cache;
76711+ start_addr = addr = mm->free_area_cache;
76712 } else {
76713- start_addr = addr = TASK_UNMAPPED_BASE;
76714- mm->cached_hole_size = 0;
76715+ start_addr = addr = mm->mmap_base;
76716+ mm->cached_hole_size = 0;
76717 }
76718
76719 full_search:
76720@@ -1303,34 +1502,40 @@ full_search:
76721 * Start a new search - just in case we missed
76722 * some holes.
76723 */
76724- if (start_addr != TASK_UNMAPPED_BASE) {
76725- addr = TASK_UNMAPPED_BASE;
76726- start_addr = addr;
76727+ if (start_addr != mm->mmap_base) {
76728+ start_addr = addr = mm->mmap_base;
76729 mm->cached_hole_size = 0;
76730 goto full_search;
76731 }
76732 return -ENOMEM;
76733 }
76734- if (!vma || addr + len <= vma->vm_start) {
76735- /*
76736- * Remember the place where we stopped the search:
76737- */
76738- mm->free_area_cache = addr + len;
76739- return addr;
76740- }
76741+ if (check_heap_stack_gap(vma, addr, len))
76742+ break;
76743 if (addr + mm->cached_hole_size < vma->vm_start)
76744 mm->cached_hole_size = vma->vm_start - addr;
76745 addr = vma->vm_end;
76746 }
76747+
76748+ /*
76749+ * Remember the place where we stopped the search:
76750+ */
76751+ mm->free_area_cache = addr + len;
76752+ return addr;
76753 }
76754 #endif
76755
76756 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
76757 {
76758+
76759+#ifdef CONFIG_PAX_SEGMEXEC
76760+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
76761+ return;
76762+#endif
76763+
76764 /*
76765 * Is this a new hole at the lowest possible address?
76766 */
76767- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
76768+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
76769 mm->free_area_cache = addr;
76770 mm->cached_hole_size = ~0UL;
76771 }
76772@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
76773 {
76774 struct vm_area_struct *vma;
76775 struct mm_struct *mm = current->mm;
76776- unsigned long addr = addr0;
76777+ unsigned long base = mm->mmap_base, addr = addr0;
76778
76779 /* requested length too big for entire address space */
76780 if (len > TASK_SIZE)
76781@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
76782 if (flags & MAP_FIXED)
76783 return addr;
76784
76785+#ifdef CONFIG_PAX_RANDMMAP
76786+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
76787+#endif
76788+
76789 /* requesting a specific address */
76790 if (addr) {
76791 addr = PAGE_ALIGN(addr);
76792- vma = find_vma(mm, addr);
76793- if (TASK_SIZE - len >= addr &&
76794- (!vma || addr + len <= vma->vm_start))
76795- return addr;
76796+ if (TASK_SIZE - len >= addr) {
76797+ vma = find_vma(mm, addr);
76798+ if (check_heap_stack_gap(vma, addr, len))
76799+ return addr;
76800+ }
76801 }
76802
76803 /* check if free_area_cache is useful for us */
76804@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
76805 /* make sure it can fit in the remaining address space */
76806 if (addr > len) {
76807 vma = find_vma(mm, addr-len);
76808- if (!vma || addr <= vma->vm_start)
76809+ if (check_heap_stack_gap(vma, addr - len, len))
76810 /* remember the address as a hint for next time */
76811 return (mm->free_area_cache = addr-len);
76812 }
76813@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
76814 * return with success:
76815 */
76816 vma = find_vma(mm, addr);
76817- if (!vma || addr+len <= vma->vm_start)
76818+ if (check_heap_stack_gap(vma, addr, len))
76819 /* remember the address as a hint for next time */
76820 return (mm->free_area_cache = addr);
76821
76822@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
76823 mm->cached_hole_size = vma->vm_start - addr;
76824
76825 /* try just below the current vma->vm_start */
76826- addr = vma->vm_start-len;
76827- } while (len < vma->vm_start);
76828+ addr = skip_heap_stack_gap(vma, len);
76829+ } while (!IS_ERR_VALUE(addr));
76830
76831 bottomup:
76832 /*
76833@@ -1414,13 +1624,21 @@ bottomup:
76834 * can happen with large stack limits and large mmap()
76835 * allocations.
76836 */
76837+ mm->mmap_base = TASK_UNMAPPED_BASE;
76838+
76839+#ifdef CONFIG_PAX_RANDMMAP
76840+ if (mm->pax_flags & MF_PAX_RANDMMAP)
76841+ mm->mmap_base += mm->delta_mmap;
76842+#endif
76843+
76844+ mm->free_area_cache = mm->mmap_base;
76845 mm->cached_hole_size = ~0UL;
76846- mm->free_area_cache = TASK_UNMAPPED_BASE;
76847 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
76848 /*
76849 * Restore the topdown base:
76850 */
76851- mm->free_area_cache = mm->mmap_base;
76852+ mm->mmap_base = base;
76853+ mm->free_area_cache = base;
76854 mm->cached_hole_size = ~0UL;
76855
76856 return addr;
76857@@ -1429,6 +1647,12 @@ bottomup:
76858
76859 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
76860 {
76861+
76862+#ifdef CONFIG_PAX_SEGMEXEC
76863+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
76864+ return;
76865+#endif
76866+
76867 /*
76868 * Is this a new hole at the highest possible address?
76869 */
76870@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
76871 mm->free_area_cache = addr;
76872
76873 /* dont allow allocations above current base */
76874- if (mm->free_area_cache > mm->mmap_base)
76875+ if (mm->free_area_cache > mm->mmap_base) {
76876 mm->free_area_cache = mm->mmap_base;
76877+ mm->cached_hole_size = ~0UL;
76878+ }
76879 }
76880
76881 unsigned long
76882@@ -1545,6 +1771,27 @@ out:
76883 return prev ? prev->vm_next : vma;
76884 }
76885
76886+#ifdef CONFIG_PAX_SEGMEXEC
76887+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
76888+{
76889+ struct vm_area_struct *vma_m;
76890+
76891+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
76892+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
76893+ BUG_ON(vma->vm_mirror);
76894+ return NULL;
76895+ }
76896+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
76897+ vma_m = vma->vm_mirror;
76898+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
76899+ BUG_ON(vma->vm_file != vma_m->vm_file);
76900+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
76901+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
76902+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
76903+ return vma_m;
76904+}
76905+#endif
76906+
76907 /*
76908 * Verify that the stack growth is acceptable and
76909 * update accounting. This is shared with both the
76910@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
76911 return -ENOMEM;
76912
76913 /* Stack limit test */
76914+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
76915 if (size > rlim[RLIMIT_STACK].rlim_cur)
76916 return -ENOMEM;
76917
76918@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
76919 unsigned long limit;
76920 locked = mm->locked_vm + grow;
76921 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
76922+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
76923 if (locked > limit && !capable(CAP_IPC_LOCK))
76924 return -ENOMEM;
76925 }
76926@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
76927 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
76928 * vma is the last one with address > vma->vm_end. Have to extend vma.
76929 */
76930+#ifndef CONFIG_IA64
76931+static
76932+#endif
76933 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
76934 {
76935 int error;
76936+ bool locknext;
76937
76938 if (!(vma->vm_flags & VM_GROWSUP))
76939 return -EFAULT;
76940
76941+ /* Also guard against wrapping around to address 0. */
76942+ if (address < PAGE_ALIGN(address+1))
76943+ address = PAGE_ALIGN(address+1);
76944+ else
76945+ return -ENOMEM;
76946+
76947 /*
76948 * We must make sure the anon_vma is allocated
76949 * so that the anon_vma locking is not a noop.
76950 */
76951 if (unlikely(anon_vma_prepare(vma)))
76952 return -ENOMEM;
76953+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
76954+ if (locknext && anon_vma_prepare(vma->vm_next))
76955+ return -ENOMEM;
76956 anon_vma_lock(vma);
76957+ if (locknext)
76958+ anon_vma_lock(vma->vm_next);
76959
76960 /*
76961 * vma->vm_start/vm_end cannot change under us because the caller
76962 * is required to hold the mmap_sem in read mode. We need the
76963- * anon_vma lock to serialize against concurrent expand_stacks.
76964- * Also guard against wrapping around to address 0.
76965+ * anon_vma locks to serialize against concurrent expand_stacks
76966+ * and expand_upwards.
76967 */
76968- if (address < PAGE_ALIGN(address+4))
76969- address = PAGE_ALIGN(address+4);
76970- else {
76971- anon_vma_unlock(vma);
76972- return -ENOMEM;
76973- }
76974 error = 0;
76975
76976 /* Somebody else might have raced and expanded it already */
76977- if (address > vma->vm_end) {
76978+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
76979+ error = -ENOMEM;
76980+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
76981 unsigned long size, grow;
76982
76983 size = address - vma->vm_start;
76984@@ -1643,6 +1903,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
76985 vma->vm_end = address;
76986 }
76987 }
76988+ if (locknext)
76989+ anon_vma_unlock(vma->vm_next);
76990 anon_vma_unlock(vma);
76991 return error;
76992 }
76993@@ -1655,6 +1917,8 @@ static int expand_downwards(struct vm_area_struct *vma,
76994 unsigned long address)
76995 {
76996 int error;
76997+ bool lockprev = false;
76998+ struct vm_area_struct *prev;
76999
77000 /*
77001 * We must make sure the anon_vma is allocated
77002@@ -1668,6 +1932,15 @@ static int expand_downwards(struct vm_area_struct *vma,
77003 if (error)
77004 return error;
77005
77006+ prev = vma->vm_prev;
77007+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
77008+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
77009+#endif
77010+ if (lockprev && anon_vma_prepare(prev))
77011+ return -ENOMEM;
77012+ if (lockprev)
77013+ anon_vma_lock(prev);
77014+
77015 anon_vma_lock(vma);
77016
77017 /*
77018@@ -1677,9 +1950,17 @@ static int expand_downwards(struct vm_area_struct *vma,
77019 */
77020
77021 /* Somebody else might have raced and expanded it already */
77022- if (address < vma->vm_start) {
77023+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
77024+ error = -ENOMEM;
77025+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
77026 unsigned long size, grow;
77027
77028+#ifdef CONFIG_PAX_SEGMEXEC
77029+ struct vm_area_struct *vma_m;
77030+
77031+ vma_m = pax_find_mirror_vma(vma);
77032+#endif
77033+
77034 size = vma->vm_end - address;
77035 grow = (vma->vm_start - address) >> PAGE_SHIFT;
77036
77037@@ -1689,10 +1970,22 @@ static int expand_downwards(struct vm_area_struct *vma,
77038 if (!error) {
77039 vma->vm_start = address;
77040 vma->vm_pgoff -= grow;
77041+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
77042+
77043+#ifdef CONFIG_PAX_SEGMEXEC
77044+ if (vma_m) {
77045+ vma_m->vm_start -= grow << PAGE_SHIFT;
77046+ vma_m->vm_pgoff -= grow;
77047+ }
77048+#endif
77049+
77050+
77051 }
77052 }
77053 }
77054 anon_vma_unlock(vma);
77055+ if (lockprev)
77056+ anon_vma_unlock(prev);
77057 return error;
77058 }
77059
77060@@ -1768,6 +2061,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
77061 do {
77062 long nrpages = vma_pages(vma);
77063
77064+#ifdef CONFIG_PAX_SEGMEXEC
77065+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
77066+ vma = remove_vma(vma);
77067+ continue;
77068+ }
77069+#endif
77070+
77071 mm->total_vm -= nrpages;
77072 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
77073 vma = remove_vma(vma);
77074@@ -1813,6 +2113,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
77075 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
77076 vma->vm_prev = NULL;
77077 do {
77078+
77079+#ifdef CONFIG_PAX_SEGMEXEC
77080+ if (vma->vm_mirror) {
77081+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
77082+ vma->vm_mirror->vm_mirror = NULL;
77083+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
77084+ vma->vm_mirror = NULL;
77085+ }
77086+#endif
77087+
77088 rb_erase(&vma->vm_rb, &mm->mm_rb);
77089 mm->map_count--;
77090 tail_vma = vma;
77091@@ -1840,10 +2150,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77092 struct mempolicy *pol;
77093 struct vm_area_struct *new;
77094
77095+#ifdef CONFIG_PAX_SEGMEXEC
77096+ struct vm_area_struct *vma_m, *new_m = NULL;
77097+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
77098+#endif
77099+
77100 if (is_vm_hugetlb_page(vma) && (addr &
77101 ~(huge_page_mask(hstate_vma(vma)))))
77102 return -EINVAL;
77103
77104+#ifdef CONFIG_PAX_SEGMEXEC
77105+ vma_m = pax_find_mirror_vma(vma);
77106+
77107+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
77108+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
77109+ if (mm->map_count >= sysctl_max_map_count-1)
77110+ return -ENOMEM;
77111+ } else
77112+#endif
77113+
77114 if (mm->map_count >= sysctl_max_map_count)
77115 return -ENOMEM;
77116
77117@@ -1851,6 +2176,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77118 if (!new)
77119 return -ENOMEM;
77120
77121+#ifdef CONFIG_PAX_SEGMEXEC
77122+ if (vma_m) {
77123+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
77124+ if (!new_m) {
77125+ kmem_cache_free(vm_area_cachep, new);
77126+ return -ENOMEM;
77127+ }
77128+ }
77129+#endif
77130+
77131 /* most fields are the same, copy all, and then fixup */
77132 *new = *vma;
77133
77134@@ -1861,8 +2196,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77135 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
77136 }
77137
77138+#ifdef CONFIG_PAX_SEGMEXEC
77139+ if (vma_m) {
77140+ *new_m = *vma_m;
77141+ new_m->vm_mirror = new;
77142+ new->vm_mirror = new_m;
77143+
77144+ if (new_below)
77145+ new_m->vm_end = addr_m;
77146+ else {
77147+ new_m->vm_start = addr_m;
77148+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
77149+ }
77150+ }
77151+#endif
77152+
77153 pol = mpol_dup(vma_policy(vma));
77154 if (IS_ERR(pol)) {
77155+
77156+#ifdef CONFIG_PAX_SEGMEXEC
77157+ if (new_m)
77158+ kmem_cache_free(vm_area_cachep, new_m);
77159+#endif
77160+
77161 kmem_cache_free(vm_area_cachep, new);
77162 return PTR_ERR(pol);
77163 }
77164@@ -1883,6 +2239,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77165 else
77166 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
77167
77168+#ifdef CONFIG_PAX_SEGMEXEC
77169+ if (vma_m) {
77170+ mpol_get(pol);
77171+ vma_set_policy(new_m, pol);
77172+
77173+ if (new_m->vm_file) {
77174+ get_file(new_m->vm_file);
77175+ if (vma_m->vm_flags & VM_EXECUTABLE)
77176+ added_exe_file_vma(mm);
77177+ }
77178+
77179+ if (new_m->vm_ops && new_m->vm_ops->open)
77180+ new_m->vm_ops->open(new_m);
77181+
77182+ if (new_below)
77183+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
77184+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
77185+ else
77186+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
77187+ }
77188+#endif
77189+
77190 return 0;
77191 }
77192
77193@@ -1891,11 +2269,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77194 * work. This now handles partial unmappings.
77195 * Jeremy Fitzhardinge <jeremy@goop.org>
77196 */
77197+#ifdef CONFIG_PAX_SEGMEXEC
77198+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77199+{
77200+ int ret = __do_munmap(mm, start, len);
77201+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
77202+ return ret;
77203+
77204+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
77205+}
77206+
77207+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77208+#else
77209 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77210+#endif
77211 {
77212 unsigned long end;
77213 struct vm_area_struct *vma, *prev, *last;
77214
77215+ /*
77216+ * mm->mmap_sem is required to protect against another thread
77217+ * changing the mappings in case we sleep.
77218+ */
77219+ verify_mm_writelocked(mm);
77220+
77221 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
77222 return -EINVAL;
77223
77224@@ -1959,6 +2356,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77225 /* Fix up all other VM information */
77226 remove_vma_list(mm, vma);
77227
77228+ track_exec_limit(mm, start, end, 0UL);
77229+
77230 return 0;
77231 }
77232
77233@@ -1971,22 +2370,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
77234
77235 profile_munmap(addr);
77236
77237+#ifdef CONFIG_PAX_SEGMEXEC
77238+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
77239+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
77240+ return -EINVAL;
77241+#endif
77242+
77243 down_write(&mm->mmap_sem);
77244 ret = do_munmap(mm, addr, len);
77245 up_write(&mm->mmap_sem);
77246 return ret;
77247 }
77248
77249-static inline void verify_mm_writelocked(struct mm_struct *mm)
77250-{
77251-#ifdef CONFIG_DEBUG_VM
77252- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
77253- WARN_ON(1);
77254- up_read(&mm->mmap_sem);
77255- }
77256-#endif
77257-}
77258-
77259 /*
77260 * this is really a simplified "do_mmap". it only handles
77261 * anonymous maps. eventually we may be able to do some
77262@@ -2000,6 +2395,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77263 struct rb_node ** rb_link, * rb_parent;
77264 pgoff_t pgoff = addr >> PAGE_SHIFT;
77265 int error;
77266+ unsigned long charged;
77267
77268 len = PAGE_ALIGN(len);
77269 if (!len)
77270@@ -2011,16 +2407,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77271
77272 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
77273
77274+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
77275+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
77276+ flags &= ~VM_EXEC;
77277+
77278+#ifdef CONFIG_PAX_MPROTECT
77279+ if (mm->pax_flags & MF_PAX_MPROTECT)
77280+ flags &= ~VM_MAYEXEC;
77281+#endif
77282+
77283+ }
77284+#endif
77285+
77286 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
77287 if (error & ~PAGE_MASK)
77288 return error;
77289
77290+ charged = len >> PAGE_SHIFT;
77291+
77292 /*
77293 * mlock MCL_FUTURE?
77294 */
77295 if (mm->def_flags & VM_LOCKED) {
77296 unsigned long locked, lock_limit;
77297- locked = len >> PAGE_SHIFT;
77298+ locked = charged;
77299 locked += mm->locked_vm;
77300 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
77301 lock_limit >>= PAGE_SHIFT;
77302@@ -2037,22 +2447,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77303 /*
77304 * Clear old maps. this also does some error checking for us
77305 */
77306- munmap_back:
77307 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77308 if (vma && vma->vm_start < addr + len) {
77309 if (do_munmap(mm, addr, len))
77310 return -ENOMEM;
77311- goto munmap_back;
77312+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77313+ BUG_ON(vma && vma->vm_start < addr + len);
77314 }
77315
77316 /* Check against address space limits *after* clearing old maps... */
77317- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
77318+ if (!may_expand_vm(mm, charged))
77319 return -ENOMEM;
77320
77321 if (mm->map_count > sysctl_max_map_count)
77322 return -ENOMEM;
77323
77324- if (security_vm_enough_memory(len >> PAGE_SHIFT))
77325+ if (security_vm_enough_memory(charged))
77326 return -ENOMEM;
77327
77328 /* Can we just expand an old private anonymous mapping? */
77329@@ -2066,7 +2476,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77330 */
77331 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77332 if (!vma) {
77333- vm_unacct_memory(len >> PAGE_SHIFT);
77334+ vm_unacct_memory(charged);
77335 return -ENOMEM;
77336 }
77337
77338@@ -2078,11 +2488,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77339 vma->vm_page_prot = vm_get_page_prot(flags);
77340 vma_link(mm, vma, prev, rb_link, rb_parent);
77341 out:
77342- mm->total_vm += len >> PAGE_SHIFT;
77343+ mm->total_vm += charged;
77344 if (flags & VM_LOCKED) {
77345 if (!mlock_vma_pages_range(vma, addr, addr + len))
77346- mm->locked_vm += (len >> PAGE_SHIFT);
77347+ mm->locked_vm += charged;
77348 }
77349+ track_exec_limit(mm, addr, addr + len, flags);
77350 return addr;
77351 }
77352
77353@@ -2129,8 +2540,10 @@ void exit_mmap(struct mm_struct *mm)
77354 * Walk the list again, actually closing and freeing it,
77355 * with preemption enabled, without holding any MM locks.
77356 */
77357- while (vma)
77358+ while (vma) {
77359+ vma->vm_mirror = NULL;
77360 vma = remove_vma(vma);
77361+ }
77362
77363 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
77364 }
77365@@ -2144,6 +2557,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
77366 struct vm_area_struct * __vma, * prev;
77367 struct rb_node ** rb_link, * rb_parent;
77368
77369+#ifdef CONFIG_PAX_SEGMEXEC
77370+ struct vm_area_struct *vma_m = NULL;
77371+#endif
77372+
77373 /*
77374 * The vm_pgoff of a purely anonymous vma should be irrelevant
77375 * until its first write fault, when page's anon_vma and index
77376@@ -2166,7 +2583,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
77377 if ((vma->vm_flags & VM_ACCOUNT) &&
77378 security_vm_enough_memory_mm(mm, vma_pages(vma)))
77379 return -ENOMEM;
77380+
77381+#ifdef CONFIG_PAX_SEGMEXEC
77382+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
77383+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77384+ if (!vma_m)
77385+ return -ENOMEM;
77386+ }
77387+#endif
77388+
77389 vma_link(mm, vma, prev, rb_link, rb_parent);
77390+
77391+#ifdef CONFIG_PAX_SEGMEXEC
77392+ if (vma_m)
77393+ pax_mirror_vma(vma_m, vma);
77394+#endif
77395+
77396 return 0;
77397 }
77398
77399@@ -2184,6 +2616,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
77400 struct rb_node **rb_link, *rb_parent;
77401 struct mempolicy *pol;
77402
77403+ BUG_ON(vma->vm_mirror);
77404+
77405 /*
77406 * If anonymous vma has not yet been faulted, update new pgoff
77407 * to match new location, to increase its chance of merging.
77408@@ -2227,6 +2661,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
77409 return new_vma;
77410 }
77411
77412+#ifdef CONFIG_PAX_SEGMEXEC
77413+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
77414+{
77415+ struct vm_area_struct *prev_m;
77416+ struct rb_node **rb_link_m, *rb_parent_m;
77417+ struct mempolicy *pol_m;
77418+
77419+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
77420+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
77421+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
77422+ *vma_m = *vma;
77423+ pol_m = vma_policy(vma_m);
77424+ mpol_get(pol_m);
77425+ vma_set_policy(vma_m, pol_m);
77426+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
77427+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
77428+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
77429+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
77430+ if (vma_m->vm_file)
77431+ get_file(vma_m->vm_file);
77432+ if (vma_m->vm_ops && vma_m->vm_ops->open)
77433+ vma_m->vm_ops->open(vma_m);
77434+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
77435+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
77436+ vma_m->vm_mirror = vma;
77437+ vma->vm_mirror = vma_m;
77438+}
77439+#endif
77440+
77441 /*
77442 * Return true if the calling process may expand its vm space by the passed
77443 * number of pages
77444@@ -2237,7 +2700,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
77445 unsigned long lim;
77446
77447 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
77448-
77449+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
77450 if (cur + npages > lim)
77451 return 0;
77452 return 1;
77453@@ -2307,6 +2770,22 @@ int install_special_mapping(struct mm_struct *mm,
77454 vma->vm_start = addr;
77455 vma->vm_end = addr + len;
77456
77457+#ifdef CONFIG_PAX_MPROTECT
77458+ if (mm->pax_flags & MF_PAX_MPROTECT) {
77459+#ifndef CONFIG_PAX_MPROTECT_COMPAT
77460+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
77461+ return -EPERM;
77462+ if (!(vm_flags & VM_EXEC))
77463+ vm_flags &= ~VM_MAYEXEC;
77464+#else
77465+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
77466+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
77467+#endif
77468+ else
77469+ vm_flags &= ~VM_MAYWRITE;
77470+ }
77471+#endif
77472+
77473 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
77474 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
77475
77476diff --git a/mm/mprotect.c b/mm/mprotect.c
77477index 1737c7e..c7faeb4 100644
77478--- a/mm/mprotect.c
77479+++ b/mm/mprotect.c
77480@@ -24,10 +24,16 @@
77481 #include <linux/mmu_notifier.h>
77482 #include <linux/migrate.h>
77483 #include <linux/perf_event.h>
77484+
77485+#ifdef CONFIG_PAX_MPROTECT
77486+#include <linux/elf.h>
77487+#endif
77488+
77489 #include <asm/uaccess.h>
77490 #include <asm/pgtable.h>
77491 #include <asm/cacheflush.h>
77492 #include <asm/tlbflush.h>
77493+#include <asm/mmu_context.h>
77494
77495 #ifndef pgprot_modify
77496 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
77497@@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
77498 flush_tlb_range(vma, start, end);
77499 }
77500
77501+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
77502+/* called while holding the mmap semaphor for writing except stack expansion */
77503+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
77504+{
77505+ unsigned long oldlimit, newlimit = 0UL;
77506+
77507+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
77508+ return;
77509+
77510+ spin_lock(&mm->page_table_lock);
77511+ oldlimit = mm->context.user_cs_limit;
77512+ if ((prot & VM_EXEC) && oldlimit < end)
77513+ /* USER_CS limit moved up */
77514+ newlimit = end;
77515+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
77516+ /* USER_CS limit moved down */
77517+ newlimit = start;
77518+
77519+ if (newlimit) {
77520+ mm->context.user_cs_limit = newlimit;
77521+
77522+#ifdef CONFIG_SMP
77523+ wmb();
77524+ cpus_clear(mm->context.cpu_user_cs_mask);
77525+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
77526+#endif
77527+
77528+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
77529+ }
77530+ spin_unlock(&mm->page_table_lock);
77531+ if (newlimit == end) {
77532+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
77533+
77534+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
77535+ if (is_vm_hugetlb_page(vma))
77536+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
77537+ else
77538+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
77539+ }
77540+}
77541+#endif
77542+
77543 int
77544 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
77545 unsigned long start, unsigned long end, unsigned long newflags)
77546@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
77547 int error;
77548 int dirty_accountable = 0;
77549
77550+#ifdef CONFIG_PAX_SEGMEXEC
77551+ struct vm_area_struct *vma_m = NULL;
77552+ unsigned long start_m, end_m;
77553+
77554+ start_m = start + SEGMEXEC_TASK_SIZE;
77555+ end_m = end + SEGMEXEC_TASK_SIZE;
77556+#endif
77557+
77558 if (newflags == oldflags) {
77559 *pprev = vma;
77560 return 0;
77561 }
77562
77563+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
77564+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
77565+
77566+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
77567+ return -ENOMEM;
77568+
77569+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
77570+ return -ENOMEM;
77571+ }
77572+
77573 /*
77574 * If we make a private mapping writable we increase our commit;
77575 * but (without finer accounting) cannot reduce our commit if we
77576@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
77577 }
77578 }
77579
77580+#ifdef CONFIG_PAX_SEGMEXEC
77581+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
77582+ if (start != vma->vm_start) {
77583+ error = split_vma(mm, vma, start, 1);
77584+ if (error)
77585+ goto fail;
77586+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
77587+ *pprev = (*pprev)->vm_next;
77588+ }
77589+
77590+ if (end != vma->vm_end) {
77591+ error = split_vma(mm, vma, end, 0);
77592+ if (error)
77593+ goto fail;
77594+ }
77595+
77596+ if (pax_find_mirror_vma(vma)) {
77597+ error = __do_munmap(mm, start_m, end_m - start_m);
77598+ if (error)
77599+ goto fail;
77600+ } else {
77601+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77602+ if (!vma_m) {
77603+ error = -ENOMEM;
77604+ goto fail;
77605+ }
77606+ vma->vm_flags = newflags;
77607+ pax_mirror_vma(vma_m, vma);
77608+ }
77609+ }
77610+#endif
77611+
77612 /*
77613 * First try to merge with previous and/or next vma.
77614 */
77615@@ -195,9 +293,21 @@ success:
77616 * vm_flags and vm_page_prot are protected by the mmap_sem
77617 * held in write mode.
77618 */
77619+
77620+#ifdef CONFIG_PAX_SEGMEXEC
77621+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
77622+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
77623+#endif
77624+
77625 vma->vm_flags = newflags;
77626+
77627+#ifdef CONFIG_PAX_MPROTECT
77628+ if (mm->binfmt && mm->binfmt->handle_mprotect)
77629+ mm->binfmt->handle_mprotect(vma, newflags);
77630+#endif
77631+
77632 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
77633- vm_get_page_prot(newflags));
77634+ vm_get_page_prot(vma->vm_flags));
77635
77636 if (vma_wants_writenotify(vma)) {
77637 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
77638@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77639 end = start + len;
77640 if (end <= start)
77641 return -ENOMEM;
77642+
77643+#ifdef CONFIG_PAX_SEGMEXEC
77644+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
77645+ if (end > SEGMEXEC_TASK_SIZE)
77646+ return -EINVAL;
77647+ } else
77648+#endif
77649+
77650+ if (end > TASK_SIZE)
77651+ return -EINVAL;
77652+
77653 if (!arch_validate_prot(prot))
77654 return -EINVAL;
77655
77656@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77657 /*
77658 * Does the application expect PROT_READ to imply PROT_EXEC:
77659 */
77660- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
77661+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
77662 prot |= PROT_EXEC;
77663
77664 vm_flags = calc_vm_prot_bits(prot);
77665@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77666 if (start > vma->vm_start)
77667 prev = vma;
77668
77669+#ifdef CONFIG_PAX_MPROTECT
77670+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
77671+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
77672+#endif
77673+
77674 for (nstart = start ; ; ) {
77675 unsigned long newflags;
77676
77677@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77678
77679 /* newflags >> 4 shift VM_MAY% in place of VM_% */
77680 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
77681+ if (prot & (PROT_WRITE | PROT_EXEC))
77682+ gr_log_rwxmprotect(vma->vm_file);
77683+
77684+ error = -EACCES;
77685+ goto out;
77686+ }
77687+
77688+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
77689 error = -EACCES;
77690 goto out;
77691 }
77692@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77693 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
77694 if (error)
77695 goto out;
77696+
77697+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
77698+
77699 nstart = tmp;
77700
77701 if (nstart < prev->vm_end)
77702diff --git a/mm/mremap.c b/mm/mremap.c
77703index 3e98d79..1706cec 100644
77704--- a/mm/mremap.c
77705+++ b/mm/mremap.c
77706@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
77707 continue;
77708 pte = ptep_clear_flush(vma, old_addr, old_pte);
77709 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
77710+
77711+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
77712+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
77713+ pte = pte_exprotect(pte);
77714+#endif
77715+
77716 set_pte_at(mm, new_addr, new_pte, pte);
77717 }
77718
77719@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
77720 if (is_vm_hugetlb_page(vma))
77721 goto Einval;
77722
77723+#ifdef CONFIG_PAX_SEGMEXEC
77724+ if (pax_find_mirror_vma(vma))
77725+ goto Einval;
77726+#endif
77727+
77728 /* We can't remap across vm area boundaries */
77729 if (old_len > vma->vm_end - addr)
77730 goto Efault;
77731@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
77732 unsigned long ret = -EINVAL;
77733 unsigned long charged = 0;
77734 unsigned long map_flags;
77735+ unsigned long pax_task_size = TASK_SIZE;
77736
77737 if (new_addr & ~PAGE_MASK)
77738 goto out;
77739
77740- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
77741+#ifdef CONFIG_PAX_SEGMEXEC
77742+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
77743+ pax_task_size = SEGMEXEC_TASK_SIZE;
77744+#endif
77745+
77746+ pax_task_size -= PAGE_SIZE;
77747+
77748+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
77749 goto out;
77750
77751 /* Check if the location we're moving into overlaps the
77752 * old location at all, and fail if it does.
77753 */
77754- if ((new_addr <= addr) && (new_addr+new_len) > addr)
77755- goto out;
77756-
77757- if ((addr <= new_addr) && (addr+old_len) > new_addr)
77758+ if (addr + old_len > new_addr && new_addr + new_len > addr)
77759 goto out;
77760
77761 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
77762@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
77763 struct vm_area_struct *vma;
77764 unsigned long ret = -EINVAL;
77765 unsigned long charged = 0;
77766+ unsigned long pax_task_size = TASK_SIZE;
77767
77768 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
77769 goto out;
77770@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
77771 if (!new_len)
77772 goto out;
77773
77774+#ifdef CONFIG_PAX_SEGMEXEC
77775+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
77776+ pax_task_size = SEGMEXEC_TASK_SIZE;
77777+#endif
77778+
77779+ pax_task_size -= PAGE_SIZE;
77780+
77781+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
77782+ old_len > pax_task_size || addr > pax_task_size-old_len)
77783+ goto out;
77784+
77785 if (flags & MREMAP_FIXED) {
77786 if (flags & MREMAP_MAYMOVE)
77787 ret = mremap_to(addr, old_len, new_addr, new_len);
77788@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
77789 addr + new_len);
77790 }
77791 ret = addr;
77792+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
77793 goto out;
77794 }
77795 }
77796@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
77797 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
77798 if (ret)
77799 goto out;
77800+
77801+ map_flags = vma->vm_flags;
77802 ret = move_vma(vma, addr, old_len, new_len, new_addr);
77803+ if (!(ret & ~PAGE_MASK)) {
77804+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
77805+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
77806+ }
77807 }
77808 out:
77809 if (ret & ~PAGE_MASK)
77810diff --git a/mm/nommu.c b/mm/nommu.c
77811index 406e8d4..53970d3 100644
77812--- a/mm/nommu.c
77813+++ b/mm/nommu.c
77814@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
77815 int sysctl_overcommit_ratio = 50; /* default is 50% */
77816 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
77817 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
77818-int heap_stack_gap = 0;
77819
77820 atomic_long_t mmap_pages_allocated;
77821
77822@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
77823 EXPORT_SYMBOL(find_vma);
77824
77825 /*
77826- * find a VMA
77827- * - we don't extend stack VMAs under NOMMU conditions
77828- */
77829-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
77830-{
77831- return find_vma(mm, addr);
77832-}
77833-
77834-/*
77835 * expand a stack to a given address
77836 * - not supported under NOMMU conditions
77837 */
77838diff --git a/mm/page_alloc.c b/mm/page_alloc.c
77839index 3ecab7e..594a471 100644
77840--- a/mm/page_alloc.c
77841+++ b/mm/page_alloc.c
77842@@ -289,7 +289,7 @@ out:
77843 * This usage means that zero-order pages may not be compound.
77844 */
77845
77846-static void free_compound_page(struct page *page)
77847+void free_compound_page(struct page *page)
77848 {
77849 __free_pages_ok(page, compound_order(page));
77850 }
77851@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
77852 int bad = 0;
77853 int wasMlocked = __TestClearPageMlocked(page);
77854
77855+#ifdef CONFIG_PAX_MEMORY_SANITIZE
77856+ unsigned long index = 1UL << order;
77857+#endif
77858+
77859 kmemcheck_free_shadow(page, order);
77860
77861 for (i = 0 ; i < (1 << order) ; ++i)
77862@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
77863 debug_check_no_obj_freed(page_address(page),
77864 PAGE_SIZE << order);
77865 }
77866+
77867+#ifdef CONFIG_PAX_MEMORY_SANITIZE
77868+ for (; index; --index)
77869+ sanitize_highpage(page + index - 1);
77870+#endif
77871+
77872 arch_free_page(page, order);
77873 kernel_map_pages(page, 1 << order, 0);
77874
77875@@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
77876 arch_alloc_page(page, order);
77877 kernel_map_pages(page, 1 << order, 1);
77878
77879+#ifndef CONFIG_PAX_MEMORY_SANITIZE
77880 if (gfp_flags & __GFP_ZERO)
77881 prep_zero_page(page, order, gfp_flags);
77882+#endif
77883
77884 if (order && (gfp_flags & __GFP_COMP))
77885 prep_compound_page(page, order);
77886@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
77887 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
77888 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
77889 }
77890+
77891+#ifdef CONFIG_PAX_MEMORY_SANITIZE
77892+ sanitize_highpage(page);
77893+#endif
77894+
77895 arch_free_page(page, 0);
77896 kernel_map_pages(page, 1, 0);
77897
77898@@ -2179,6 +2196,8 @@ void show_free_areas(void)
77899 int cpu;
77900 struct zone *zone;
77901
77902+ pax_track_stack();
77903+
77904 for_each_populated_zone(zone) {
77905 show_node(zone);
77906 printk("%s per-cpu:\n", zone->name);
77907@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
77908 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
77909 }
77910 #else
77911-static void inline setup_usemap(struct pglist_data *pgdat,
77912+static inline void setup_usemap(struct pglist_data *pgdat,
77913 struct zone *zone, unsigned long zonesize) {}
77914 #endif /* CONFIG_SPARSEMEM */
77915
77916diff --git a/mm/percpu.c b/mm/percpu.c
77917index 3bfd6e2..60404b9 100644
77918--- a/mm/percpu.c
77919+++ b/mm/percpu.c
77920@@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu __read_mostly;
77921 static unsigned int pcpu_last_unit_cpu __read_mostly;
77922
77923 /* the address of the first chunk which starts with the kernel static area */
77924-void *pcpu_base_addr __read_mostly;
77925+void *pcpu_base_addr __read_only;
77926 EXPORT_SYMBOL_GPL(pcpu_base_addr);
77927
77928 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
77929diff --git a/mm/rmap.c b/mm/rmap.c
77930index dd43373..d848cd7 100644
77931--- a/mm/rmap.c
77932+++ b/mm/rmap.c
77933@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
77934 /* page_table_lock to protect against threads */
77935 spin_lock(&mm->page_table_lock);
77936 if (likely(!vma->anon_vma)) {
77937+
77938+#ifdef CONFIG_PAX_SEGMEXEC
77939+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
77940+
77941+ if (vma_m) {
77942+ BUG_ON(vma_m->anon_vma);
77943+ vma_m->anon_vma = anon_vma;
77944+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
77945+ }
77946+#endif
77947+
77948 vma->anon_vma = anon_vma;
77949 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
77950 allocated = NULL;
77951diff --git a/mm/shmem.c b/mm/shmem.c
77952index 3e0005b..1d659a8 100644
77953--- a/mm/shmem.c
77954+++ b/mm/shmem.c
77955@@ -31,7 +31,7 @@
77956 #include <linux/swap.h>
77957 #include <linux/ima.h>
77958
77959-static struct vfsmount *shm_mnt;
77960+struct vfsmount *shm_mnt;
77961
77962 #ifdef CONFIG_SHMEM
77963 /*
77964@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
77965 goto unlock;
77966 }
77967 entry = shmem_swp_entry(info, index, NULL);
77968+ if (!entry)
77969+ goto unlock;
77970 if (entry->val) {
77971 /*
77972 * The more uptodate page coming down from a stacked
77973@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
77974 struct vm_area_struct pvma;
77975 struct page *page;
77976
77977+ pax_track_stack();
77978+
77979 spol = mpol_cond_copy(&mpol,
77980 mpol_shared_policy_lookup(&info->policy, idx));
77981
77982@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
77983
77984 info = SHMEM_I(inode);
77985 inode->i_size = len-1;
77986- if (len <= (char *)inode - (char *)info) {
77987+ if (len <= (char *)inode - (char *)info && len <= 64) {
77988 /* do it inline */
77989 memcpy(info, symname, len);
77990 inode->i_op = &shmem_symlink_inline_operations;
77991@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
77992 int err = -ENOMEM;
77993
77994 /* Round up to L1_CACHE_BYTES to resist false sharing */
77995- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
77996- L1_CACHE_BYTES), GFP_KERNEL);
77997+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
77998 if (!sbinfo)
77999 return -ENOMEM;
78000
78001diff --git a/mm/slab.c b/mm/slab.c
78002index c8d466a..909e01e 100644
78003--- a/mm/slab.c
78004+++ b/mm/slab.c
78005@@ -174,7 +174,7 @@
78006
78007 /* Legal flag mask for kmem_cache_create(). */
78008 #if DEBUG
78009-# define CREATE_MASK (SLAB_RED_ZONE | \
78010+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
78011 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
78012 SLAB_CACHE_DMA | \
78013 SLAB_STORE_USER | \
78014@@ -182,7 +182,7 @@
78015 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
78016 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
78017 #else
78018-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
78019+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
78020 SLAB_CACHE_DMA | \
78021 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
78022 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
78023@@ -308,7 +308,7 @@ struct kmem_list3 {
78024 * Need this for bootstrapping a per node allocator.
78025 */
78026 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
78027-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
78028+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
78029 #define CACHE_CACHE 0
78030 #define SIZE_AC MAX_NUMNODES
78031 #define SIZE_L3 (2 * MAX_NUMNODES)
78032@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
78033 if ((x)->max_freeable < i) \
78034 (x)->max_freeable = i; \
78035 } while (0)
78036-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
78037-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
78038-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
78039-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
78040+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
78041+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
78042+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
78043+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
78044 #else
78045 #define STATS_INC_ACTIVE(x) do { } while (0)
78046 #define STATS_DEC_ACTIVE(x) do { } while (0)
78047@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
78048 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
78049 */
78050 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
78051- const struct slab *slab, void *obj)
78052+ const struct slab *slab, const void *obj)
78053 {
78054 u32 offset = (obj - slab->s_mem);
78055 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
78056@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
78057 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
78058 sizes[INDEX_AC].cs_size,
78059 ARCH_KMALLOC_MINALIGN,
78060- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
78061+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
78062 NULL);
78063
78064 if (INDEX_AC != INDEX_L3) {
78065@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
78066 kmem_cache_create(names[INDEX_L3].name,
78067 sizes[INDEX_L3].cs_size,
78068 ARCH_KMALLOC_MINALIGN,
78069- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
78070+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
78071 NULL);
78072 }
78073
78074@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
78075 sizes->cs_cachep = kmem_cache_create(names->name,
78076 sizes->cs_size,
78077 ARCH_KMALLOC_MINALIGN,
78078- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
78079+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
78080 NULL);
78081 }
78082 #ifdef CONFIG_ZONE_DMA
78083@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
78084 }
78085 /* cpu stats */
78086 {
78087- unsigned long allochit = atomic_read(&cachep->allochit);
78088- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
78089- unsigned long freehit = atomic_read(&cachep->freehit);
78090- unsigned long freemiss = atomic_read(&cachep->freemiss);
78091+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
78092+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
78093+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
78094+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
78095
78096 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
78097 allochit, allocmiss, freehit, freemiss);
78098@@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
78099
78100 static int __init slab_proc_init(void)
78101 {
78102- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
78103+ mode_t gr_mode = S_IRUGO;
78104+
78105+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78106+ gr_mode = S_IRUSR;
78107+#endif
78108+
78109+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
78110 #ifdef CONFIG_DEBUG_SLAB_LEAK
78111- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
78112+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
78113 #endif
78114 return 0;
78115 }
78116 module_init(slab_proc_init);
78117 #endif
78118
78119+void check_object_size(const void *ptr, unsigned long n, bool to)
78120+{
78121+
78122+#ifdef CONFIG_PAX_USERCOPY
78123+ struct page *page;
78124+ struct kmem_cache *cachep = NULL;
78125+ struct slab *slabp;
78126+ unsigned int objnr;
78127+ unsigned long offset;
78128+ const char *type;
78129+
78130+ if (!n)
78131+ return;
78132+
78133+ type = "<null>";
78134+ if (ZERO_OR_NULL_PTR(ptr))
78135+ goto report;
78136+
78137+ if (!virt_addr_valid(ptr))
78138+ return;
78139+
78140+ page = virt_to_head_page(ptr);
78141+
78142+ type = "<process stack>";
78143+ if (!PageSlab(page)) {
78144+ if (object_is_on_stack(ptr, n) == -1)
78145+ goto report;
78146+ return;
78147+ }
78148+
78149+ cachep = page_get_cache(page);
78150+ type = cachep->name;
78151+ if (!(cachep->flags & SLAB_USERCOPY))
78152+ goto report;
78153+
78154+ slabp = page_get_slab(page);
78155+ objnr = obj_to_index(cachep, slabp, ptr);
78156+ BUG_ON(objnr >= cachep->num);
78157+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
78158+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
78159+ return;
78160+
78161+report:
78162+ pax_report_usercopy(ptr, n, to, type);
78163+#endif
78164+
78165+}
78166+EXPORT_SYMBOL(check_object_size);
78167+
78168 /**
78169 * ksize - get the actual amount of memory allocated for a given object
78170 * @objp: Pointer to the object
78171diff --git a/mm/slob.c b/mm/slob.c
78172index 837ebd6..4712174 100644
78173--- a/mm/slob.c
78174+++ b/mm/slob.c
78175@@ -29,7 +29,7 @@
78176 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
78177 * alloc_pages() directly, allocating compound pages so the page order
78178 * does not have to be separately tracked, and also stores the exact
78179- * allocation size in page->private so that it can be used to accurately
78180+ * allocation size in slob_page->size so that it can be used to accurately
78181 * provide ksize(). These objects are detected in kfree() because slob_page()
78182 * is false for them.
78183 *
78184@@ -58,6 +58,7 @@
78185 */
78186
78187 #include <linux/kernel.h>
78188+#include <linux/sched.h>
78189 #include <linux/slab.h>
78190 #include <linux/mm.h>
78191 #include <linux/swap.h> /* struct reclaim_state */
78192@@ -100,7 +101,8 @@ struct slob_page {
78193 unsigned long flags; /* mandatory */
78194 atomic_t _count; /* mandatory */
78195 slobidx_t units; /* free units left in page */
78196- unsigned long pad[2];
78197+ unsigned long pad[1];
78198+ unsigned long size; /* size when >=PAGE_SIZE */
78199 slob_t *free; /* first free slob_t in page */
78200 struct list_head list; /* linked list of free pages */
78201 };
78202@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
78203 */
78204 static inline int is_slob_page(struct slob_page *sp)
78205 {
78206- return PageSlab((struct page *)sp);
78207+ return PageSlab((struct page *)sp) && !sp->size;
78208 }
78209
78210 static inline void set_slob_page(struct slob_page *sp)
78211@@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
78212
78213 static inline struct slob_page *slob_page(const void *addr)
78214 {
78215- return (struct slob_page *)virt_to_page(addr);
78216+ return (struct slob_page *)virt_to_head_page(addr);
78217 }
78218
78219 /*
78220@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
78221 /*
78222 * Return the size of a slob block.
78223 */
78224-static slobidx_t slob_units(slob_t *s)
78225+static slobidx_t slob_units(const slob_t *s)
78226 {
78227 if (s->units > 0)
78228 return s->units;
78229@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
78230 /*
78231 * Return the next free slob block pointer after this one.
78232 */
78233-static slob_t *slob_next(slob_t *s)
78234+static slob_t *slob_next(const slob_t *s)
78235 {
78236 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
78237 slobidx_t next;
78238@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
78239 /*
78240 * Returns true if s is the last free block in its page.
78241 */
78242-static int slob_last(slob_t *s)
78243+static int slob_last(const slob_t *s)
78244 {
78245 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
78246 }
78247@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
78248 if (!page)
78249 return NULL;
78250
78251+ set_slob_page(page);
78252 return page_address(page);
78253 }
78254
78255@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
78256 if (!b)
78257 return NULL;
78258 sp = slob_page(b);
78259- set_slob_page(sp);
78260
78261 spin_lock_irqsave(&slob_lock, flags);
78262 sp->units = SLOB_UNITS(PAGE_SIZE);
78263 sp->free = b;
78264+ sp->size = 0;
78265 INIT_LIST_HEAD(&sp->list);
78266 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
78267 set_slob_page_free(sp, slob_list);
78268@@ -475,10 +478,9 @@ out:
78269 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
78270 #endif
78271
78272-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78273+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
78274 {
78275- unsigned int *m;
78276- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78277+ slob_t *m;
78278 void *ret;
78279
78280 lockdep_trace_alloc(gfp);
78281@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78282
78283 if (!m)
78284 return NULL;
78285- *m = size;
78286+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
78287+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
78288+ m[0].units = size;
78289+ m[1].units = align;
78290 ret = (void *)m + align;
78291
78292 trace_kmalloc_node(_RET_IP_, ret,
78293@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78294
78295 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
78296 if (ret) {
78297- struct page *page;
78298- page = virt_to_page(ret);
78299- page->private = size;
78300+ struct slob_page *sp;
78301+ sp = slob_page(ret);
78302+ sp->size = size;
78303 }
78304
78305 trace_kmalloc_node(_RET_IP_, ret,
78306 size, PAGE_SIZE << order, gfp, node);
78307 }
78308
78309- kmemleak_alloc(ret, size, 1, gfp);
78310+ return ret;
78311+}
78312+
78313+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78314+{
78315+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78316+ void *ret = __kmalloc_node_align(size, gfp, node, align);
78317+
78318+ if (!ZERO_OR_NULL_PTR(ret))
78319+ kmemleak_alloc(ret, size, 1, gfp);
78320 return ret;
78321 }
78322 EXPORT_SYMBOL(__kmalloc_node);
78323@@ -528,13 +542,92 @@ void kfree(const void *block)
78324 sp = slob_page(block);
78325 if (is_slob_page(sp)) {
78326 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78327- unsigned int *m = (unsigned int *)(block - align);
78328- slob_free(m, *m + align);
78329- } else
78330+ slob_t *m = (slob_t *)(block - align);
78331+ slob_free(m, m[0].units + align);
78332+ } else {
78333+ clear_slob_page(sp);
78334+ free_slob_page(sp);
78335+ sp->size = 0;
78336 put_page(&sp->page);
78337+ }
78338 }
78339 EXPORT_SYMBOL(kfree);
78340
78341+void check_object_size(const void *ptr, unsigned long n, bool to)
78342+{
78343+
78344+#ifdef CONFIG_PAX_USERCOPY
78345+ struct slob_page *sp;
78346+ const slob_t *free;
78347+ const void *base;
78348+ unsigned long flags;
78349+ const char *type;
78350+
78351+ if (!n)
78352+ return;
78353+
78354+ type = "<null>";
78355+ if (ZERO_OR_NULL_PTR(ptr))
78356+ goto report;
78357+
78358+ if (!virt_addr_valid(ptr))
78359+ return;
78360+
78361+ type = "<process stack>";
78362+ sp = slob_page(ptr);
78363+ if (!PageSlab((struct page*)sp)) {
78364+ if (object_is_on_stack(ptr, n) == -1)
78365+ goto report;
78366+ return;
78367+ }
78368+
78369+ type = "<slob>";
78370+ if (sp->size) {
78371+ base = page_address(&sp->page);
78372+ if (base <= ptr && n <= sp->size - (ptr - base))
78373+ return;
78374+ goto report;
78375+ }
78376+
78377+ /* some tricky double walking to find the chunk */
78378+ spin_lock_irqsave(&slob_lock, flags);
78379+ base = (void *)((unsigned long)ptr & PAGE_MASK);
78380+ free = sp->free;
78381+
78382+ while (!slob_last(free) && (void *)free <= ptr) {
78383+ base = free + slob_units(free);
78384+ free = slob_next(free);
78385+ }
78386+
78387+ while (base < (void *)free) {
78388+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
78389+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
78390+ int offset;
78391+
78392+ if (ptr < base + align)
78393+ break;
78394+
78395+ offset = ptr - base - align;
78396+ if (offset >= m) {
78397+ base += size;
78398+ continue;
78399+ }
78400+
78401+ if (n > m - offset)
78402+ break;
78403+
78404+ spin_unlock_irqrestore(&slob_lock, flags);
78405+ return;
78406+ }
78407+
78408+ spin_unlock_irqrestore(&slob_lock, flags);
78409+report:
78410+ pax_report_usercopy(ptr, n, to, type);
78411+#endif
78412+
78413+}
78414+EXPORT_SYMBOL(check_object_size);
78415+
78416 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
78417 size_t ksize(const void *block)
78418 {
78419@@ -547,10 +640,10 @@ size_t ksize(const void *block)
78420 sp = slob_page(block);
78421 if (is_slob_page(sp)) {
78422 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78423- unsigned int *m = (unsigned int *)(block - align);
78424- return SLOB_UNITS(*m) * SLOB_UNIT;
78425+ slob_t *m = (slob_t *)(block - align);
78426+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
78427 } else
78428- return sp->page.private;
78429+ return sp->size;
78430 }
78431 EXPORT_SYMBOL(ksize);
78432
78433@@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
78434 {
78435 struct kmem_cache *c;
78436
78437+#ifdef CONFIG_PAX_USERCOPY
78438+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
78439+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
78440+#else
78441 c = slob_alloc(sizeof(struct kmem_cache),
78442 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
78443+#endif
78444
78445 if (c) {
78446 c->name = name;
78447@@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
78448 {
78449 void *b;
78450
78451+#ifdef CONFIG_PAX_USERCOPY
78452+ b = __kmalloc_node_align(c->size, flags, node, c->align);
78453+#else
78454 if (c->size < PAGE_SIZE) {
78455 b = slob_alloc(c->size, flags, c->align, node);
78456 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
78457 SLOB_UNITS(c->size) * SLOB_UNIT,
78458 flags, node);
78459 } else {
78460+ struct slob_page *sp;
78461+
78462 b = slob_new_pages(flags, get_order(c->size), node);
78463+ sp = slob_page(b);
78464+ sp->size = c->size;
78465 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
78466 PAGE_SIZE << get_order(c->size),
78467 flags, node);
78468 }
78469+#endif
78470
78471 if (c->ctor)
78472 c->ctor(b);
78473@@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
78474
78475 static void __kmem_cache_free(void *b, int size)
78476 {
78477- if (size < PAGE_SIZE)
78478+ struct slob_page *sp = slob_page(b);
78479+
78480+ if (is_slob_page(sp))
78481 slob_free(b, size);
78482- else
78483+ else {
78484+ clear_slob_page(sp);
78485+ free_slob_page(sp);
78486+ sp->size = 0;
78487 slob_free_pages(b, get_order(size));
78488+ }
78489 }
78490
78491 static void kmem_rcu_free(struct rcu_head *head)
78492@@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
78493
78494 void kmem_cache_free(struct kmem_cache *c, void *b)
78495 {
78496+ int size = c->size;
78497+
78498+#ifdef CONFIG_PAX_USERCOPY
78499+ if (size + c->align < PAGE_SIZE) {
78500+ size += c->align;
78501+ b -= c->align;
78502+ }
78503+#endif
78504+
78505 kmemleak_free_recursive(b, c->flags);
78506 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
78507 struct slob_rcu *slob_rcu;
78508- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
78509+ slob_rcu = b + (size - sizeof(struct slob_rcu));
78510 INIT_RCU_HEAD(&slob_rcu->head);
78511- slob_rcu->size = c->size;
78512+ slob_rcu->size = size;
78513 call_rcu(&slob_rcu->head, kmem_rcu_free);
78514 } else {
78515- __kmem_cache_free(b, c->size);
78516+ __kmem_cache_free(b, size);
78517 }
78518
78519+#ifdef CONFIG_PAX_USERCOPY
78520+ trace_kfree(_RET_IP_, b);
78521+#else
78522 trace_kmem_cache_free(_RET_IP_, b);
78523+#endif
78524+
78525 }
78526 EXPORT_SYMBOL(kmem_cache_free);
78527
78528diff --git a/mm/slub.c b/mm/slub.c
78529index 4996fc7..87e01d0 100644
78530--- a/mm/slub.c
78531+++ b/mm/slub.c
78532@@ -201,7 +201,7 @@ struct track {
78533
78534 enum track_item { TRACK_ALLOC, TRACK_FREE };
78535
78536-#ifdef CONFIG_SLUB_DEBUG
78537+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
78538 static int sysfs_slab_add(struct kmem_cache *);
78539 static int sysfs_slab_alias(struct kmem_cache *, const char *);
78540 static void sysfs_slab_remove(struct kmem_cache *);
78541@@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
78542 if (!t->addr)
78543 return;
78544
78545- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
78546+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
78547 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
78548 }
78549
78550@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
78551
78552 page = virt_to_head_page(x);
78553
78554+ BUG_ON(!PageSlab(page));
78555+
78556 slab_free(s, page, x, _RET_IP_);
78557
78558 trace_kmem_cache_free(_RET_IP_, x);
78559@@ -1937,7 +1939,7 @@ static int slub_min_objects;
78560 * Merge control. If this is set then no merging of slab caches will occur.
78561 * (Could be removed. This was introduced to pacify the merge skeptics.)
78562 */
78563-static int slub_nomerge;
78564+static int slub_nomerge = 1;
78565
78566 /*
78567 * Calculate the order of allocation given an slab object size.
78568@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
78569 * list to avoid pounding the page allocator excessively.
78570 */
78571 set_min_partial(s, ilog2(s->size));
78572- s->refcount = 1;
78573+ atomic_set(&s->refcount, 1);
78574 #ifdef CONFIG_NUMA
78575 s->remote_node_defrag_ratio = 1000;
78576 #endif
78577@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
78578 void kmem_cache_destroy(struct kmem_cache *s)
78579 {
78580 down_write(&slub_lock);
78581- s->refcount--;
78582- if (!s->refcount) {
78583+ if (atomic_dec_and_test(&s->refcount)) {
78584 list_del(&s->list);
78585 up_write(&slub_lock);
78586 if (kmem_cache_close(s)) {
78587@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
78588 __setup("slub_nomerge", setup_slub_nomerge);
78589
78590 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
78591- const char *name, int size, gfp_t gfp_flags)
78592+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
78593 {
78594- unsigned int flags = 0;
78595-
78596 if (gfp_flags & SLUB_DMA)
78597- flags = SLAB_CACHE_DMA;
78598+ flags |= SLAB_CACHE_DMA;
78599
78600 /*
78601 * This function is called with IRQs disabled during early-boot on
78602@@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
78603 EXPORT_SYMBOL(__kmalloc_node);
78604 #endif
78605
78606+void check_object_size(const void *ptr, unsigned long n, bool to)
78607+{
78608+
78609+#ifdef CONFIG_PAX_USERCOPY
78610+ struct page *page;
78611+ struct kmem_cache *s = NULL;
78612+ unsigned long offset;
78613+ const char *type;
78614+
78615+ if (!n)
78616+ return;
78617+
78618+ type = "<null>";
78619+ if (ZERO_OR_NULL_PTR(ptr))
78620+ goto report;
78621+
78622+ if (!virt_addr_valid(ptr))
78623+ return;
78624+
78625+ page = get_object_page(ptr);
78626+
78627+ type = "<process stack>";
78628+ if (!page) {
78629+ if (object_is_on_stack(ptr, n) == -1)
78630+ goto report;
78631+ return;
78632+ }
78633+
78634+ s = page->slab;
78635+ type = s->name;
78636+ if (!(s->flags & SLAB_USERCOPY))
78637+ goto report;
78638+
78639+ offset = (ptr - page_address(page)) % s->size;
78640+ if (offset <= s->objsize && n <= s->objsize - offset)
78641+ return;
78642+
78643+report:
78644+ pax_report_usercopy(ptr, n, to, type);
78645+#endif
78646+
78647+}
78648+EXPORT_SYMBOL(check_object_size);
78649+
78650 size_t ksize(const void *object)
78651 {
78652 struct page *page;
78653@@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
78654 * kmem_cache_open for slab_state == DOWN.
78655 */
78656 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
78657- sizeof(struct kmem_cache_node), GFP_NOWAIT);
78658- kmalloc_caches[0].refcount = -1;
78659+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
78660+ atomic_set(&kmalloc_caches[0].refcount, -1);
78661 caches++;
78662
78663 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
78664@@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
78665 /* Caches that are not of the two-to-the-power-of size */
78666 if (KMALLOC_MIN_SIZE <= 32) {
78667 create_kmalloc_cache(&kmalloc_caches[1],
78668- "kmalloc-96", 96, GFP_NOWAIT);
78669+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
78670 caches++;
78671 }
78672 if (KMALLOC_MIN_SIZE <= 64) {
78673 create_kmalloc_cache(&kmalloc_caches[2],
78674- "kmalloc-192", 192, GFP_NOWAIT);
78675+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
78676 caches++;
78677 }
78678
78679 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
78680 create_kmalloc_cache(&kmalloc_caches[i],
78681- "kmalloc", 1 << i, GFP_NOWAIT);
78682+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
78683 caches++;
78684 }
78685
78686@@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
78687 /*
78688 * We may have set a slab to be unmergeable during bootstrap.
78689 */
78690- if (s->refcount < 0)
78691+ if (atomic_read(&s->refcount) < 0)
78692 return 1;
78693
78694 return 0;
78695@@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
78696 if (s) {
78697 int cpu;
78698
78699- s->refcount++;
78700+ atomic_inc(&s->refcount);
78701 /*
78702 * Adjust the object sizes so that we clear
78703 * the complete object on kzalloc.
78704@@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
78705
78706 if (sysfs_slab_alias(s, name)) {
78707 down_write(&slub_lock);
78708- s->refcount--;
78709+ atomic_dec(&s->refcount);
78710 up_write(&slub_lock);
78711 goto err;
78712 }
78713@@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
78714
78715 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
78716 {
78717- return sprintf(buf, "%d\n", s->refcount - 1);
78718+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
78719 }
78720 SLAB_ATTR_RO(aliases);
78721
78722@@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
78723 kfree(s);
78724 }
78725
78726-static struct sysfs_ops slab_sysfs_ops = {
78727+static const struct sysfs_ops slab_sysfs_ops = {
78728 .show = slab_attr_show,
78729 .store = slab_attr_store,
78730 };
78731@@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
78732 return 0;
78733 }
78734
78735-static struct kset_uevent_ops slab_uevent_ops = {
78736+static const struct kset_uevent_ops slab_uevent_ops = {
78737 .filter = uevent_filter,
78738 };
78739
78740@@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
78741 return name;
78742 }
78743
78744+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
78745 static int sysfs_slab_add(struct kmem_cache *s)
78746 {
78747 int err;
78748@@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
78749 kobject_del(&s->kobj);
78750 kobject_put(&s->kobj);
78751 }
78752+#endif
78753
78754 /*
78755 * Need to buffer aliases during bootup until sysfs becomes
78756@@ -4632,6 +4677,7 @@ struct saved_alias {
78757
78758 static struct saved_alias *alias_list;
78759
78760+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
78761 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
78762 {
78763 struct saved_alias *al;
78764@@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
78765 alias_list = al;
78766 return 0;
78767 }
78768+#endif
78769
78770 static int __init slab_sysfs_init(void)
78771 {
78772@@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
78773
78774 static int __init slab_proc_init(void)
78775 {
78776- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
78777+ mode_t gr_mode = S_IRUGO;
78778+
78779+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78780+ gr_mode = S_IRUSR;
78781+#endif
78782+
78783+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
78784 return 0;
78785 }
78786 module_init(slab_proc_init);
78787diff --git a/mm/swap.c b/mm/swap.c
78788index 308e57d..5de19c0 100644
78789--- a/mm/swap.c
78790+++ b/mm/swap.c
78791@@ -30,6 +30,7 @@
78792 #include <linux/notifier.h>
78793 #include <linux/backing-dev.h>
78794 #include <linux/memcontrol.h>
78795+#include <linux/hugetlb.h>
78796
78797 #include "internal.h"
78798
78799@@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
78800 compound_page_dtor *dtor;
78801
78802 dtor = get_compound_page_dtor(page);
78803+ if (!PageHuge(page))
78804+ BUG_ON(dtor != free_compound_page);
78805 (*dtor)(page);
78806 }
78807 }
78808diff --git a/mm/util.c b/mm/util.c
78809index b377ce4..3a891af 100644
78810--- a/mm/util.c
78811+++ b/mm/util.c
78812@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
78813 void arch_pick_mmap_layout(struct mm_struct *mm)
78814 {
78815 mm->mmap_base = TASK_UNMAPPED_BASE;
78816+
78817+#ifdef CONFIG_PAX_RANDMMAP
78818+ if (mm->pax_flags & MF_PAX_RANDMMAP)
78819+ mm->mmap_base += mm->delta_mmap;
78820+#endif
78821+
78822 mm->get_unmapped_area = arch_get_unmapped_area;
78823 mm->unmap_area = arch_unmap_area;
78824 }
78825diff --git a/mm/vmalloc.c b/mm/vmalloc.c
78826index f34ffd0..28e94b7 100644
78827--- a/mm/vmalloc.c
78828+++ b/mm/vmalloc.c
78829@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
78830
78831 pte = pte_offset_kernel(pmd, addr);
78832 do {
78833- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
78834- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
78835+
78836+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
78837+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
78838+ BUG_ON(!pte_exec(*pte));
78839+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
78840+ continue;
78841+ }
78842+#endif
78843+
78844+ {
78845+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
78846+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
78847+ }
78848 } while (pte++, addr += PAGE_SIZE, addr != end);
78849 }
78850
78851@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
78852 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
78853 {
78854 pte_t *pte;
78855+ int ret = -ENOMEM;
78856
78857 /*
78858 * nr is a running index into the array which helps higher level
78859@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
78860 pte = pte_alloc_kernel(pmd, addr);
78861 if (!pte)
78862 return -ENOMEM;
78863+
78864+ pax_open_kernel();
78865 do {
78866 struct page *page = pages[*nr];
78867
78868- if (WARN_ON(!pte_none(*pte)))
78869- return -EBUSY;
78870- if (WARN_ON(!page))
78871- return -ENOMEM;
78872+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
78873+ if (!(pgprot_val(prot) & _PAGE_NX))
78874+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
78875+ else
78876+#endif
78877+
78878+ if (WARN_ON(!pte_none(*pte))) {
78879+ ret = -EBUSY;
78880+ goto out;
78881+ }
78882+ if (WARN_ON(!page)) {
78883+ ret = -ENOMEM;
78884+ goto out;
78885+ }
78886 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
78887 (*nr)++;
78888 } while (pte++, addr += PAGE_SIZE, addr != end);
78889- return 0;
78890+ ret = 0;
78891+out:
78892+ pax_close_kernel();
78893+ return ret;
78894 }
78895
78896 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
78897@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
78898 * and fall back on vmalloc() if that fails. Others
78899 * just put it in the vmalloc space.
78900 */
78901-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
78902+#ifdef CONFIG_MODULES
78903+#ifdef MODULES_VADDR
78904 unsigned long addr = (unsigned long)x;
78905 if (addr >= MODULES_VADDR && addr < MODULES_END)
78906 return 1;
78907 #endif
78908+
78909+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
78910+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
78911+ return 1;
78912+#endif
78913+
78914+#endif
78915+
78916 return is_vmalloc_addr(x);
78917 }
78918
78919@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
78920
78921 if (!pgd_none(*pgd)) {
78922 pud_t *pud = pud_offset(pgd, addr);
78923+#ifdef CONFIG_X86
78924+ if (!pud_large(*pud))
78925+#endif
78926 if (!pud_none(*pud)) {
78927 pmd_t *pmd = pmd_offset(pud, addr);
78928+#ifdef CONFIG_X86
78929+ if (!pmd_large(*pmd))
78930+#endif
78931 if (!pmd_none(*pmd)) {
78932 pte_t *ptep, pte;
78933
78934@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
78935 struct rb_node *tmp;
78936
78937 while (*p) {
78938- struct vmap_area *tmp;
78939+ struct vmap_area *varea;
78940
78941 parent = *p;
78942- tmp = rb_entry(parent, struct vmap_area, rb_node);
78943- if (va->va_start < tmp->va_end)
78944+ varea = rb_entry(parent, struct vmap_area, rb_node);
78945+ if (va->va_start < varea->va_end)
78946 p = &(*p)->rb_left;
78947- else if (va->va_end > tmp->va_start)
78948+ else if (va->va_end > varea->va_start)
78949 p = &(*p)->rb_right;
78950 else
78951 BUG();
78952@@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
78953 struct vm_struct *area;
78954
78955 BUG_ON(in_interrupt());
78956+
78957+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
78958+ if (flags & VM_KERNEXEC) {
78959+ if (start != VMALLOC_START || end != VMALLOC_END)
78960+ return NULL;
78961+ start = (unsigned long)MODULES_EXEC_VADDR;
78962+ end = (unsigned long)MODULES_EXEC_END;
78963+ }
78964+#endif
78965+
78966 if (flags & VM_IOREMAP) {
78967 int bit = fls(size);
78968
78969@@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
78970 if (count > totalram_pages)
78971 return NULL;
78972
78973+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
78974+ if (!(pgprot_val(prot) & _PAGE_NX))
78975+ flags |= VM_KERNEXEC;
78976+#endif
78977+
78978 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
78979 __builtin_return_address(0));
78980 if (!area)
78981@@ -1594,6 +1651,13 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
78982 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
78983 return NULL;
78984
78985+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
78986+ if (!(pgprot_val(prot) & _PAGE_NX))
78987+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
78988+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
78989+ else
78990+#endif
78991+
78992 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
78993 VMALLOC_START, VMALLOC_END, node,
78994 gfp_mask, caller);
78995@@ -1619,6 +1683,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
78996 return addr;
78997 }
78998
78999+#undef __vmalloc
79000 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
79001 {
79002 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
79003@@ -1635,6 +1700,7 @@ EXPORT_SYMBOL(__vmalloc);
79004 * For tight control over page level allocator and protection flags
79005 * use __vmalloc() instead.
79006 */
79007+#undef vmalloc
79008 void *vmalloc(unsigned long size)
79009 {
79010 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
79011@@ -1649,6 +1715,7 @@ EXPORT_SYMBOL(vmalloc);
79012 * The resulting memory area is zeroed so it can be mapped to userspace
79013 * without leaking data.
79014 */
79015+#undef vmalloc_user
79016 void *vmalloc_user(unsigned long size)
79017 {
79018 struct vm_struct *area;
79019@@ -1676,6 +1743,7 @@ EXPORT_SYMBOL(vmalloc_user);
79020 * For tight control over page level allocator and protection flags
79021 * use __vmalloc() instead.
79022 */
79023+#undef vmalloc_node
79024 void *vmalloc_node(unsigned long size, int node)
79025 {
79026 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
79027@@ -1698,10 +1766,10 @@ EXPORT_SYMBOL(vmalloc_node);
79028 * For tight control over page level allocator and protection flags
79029 * use __vmalloc() instead.
79030 */
79031-
79032+#undef vmalloc_exec
79033 void *vmalloc_exec(unsigned long size)
79034 {
79035- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
79036+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
79037 -1, __builtin_return_address(0));
79038 }
79039
79040@@ -1720,6 +1788,7 @@ void *vmalloc_exec(unsigned long size)
79041 * Allocate enough 32bit PA addressable pages to cover @size from the
79042 * page level allocator and map them into contiguous kernel virtual space.
79043 */
79044+#undef vmalloc_32
79045 void *vmalloc_32(unsigned long size)
79046 {
79047 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
79048@@ -1734,6 +1803,7 @@ EXPORT_SYMBOL(vmalloc_32);
79049 * The resulting memory area is 32bit addressable and zeroed so it can be
79050 * mapped to userspace without leaking data.
79051 */
79052+#undef vmalloc_32_user
79053 void *vmalloc_32_user(unsigned long size)
79054 {
79055 struct vm_struct *area;
79056@@ -1998,6 +2068,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
79057 unsigned long uaddr = vma->vm_start;
79058 unsigned long usize = vma->vm_end - vma->vm_start;
79059
79060+ BUG_ON(vma->vm_mirror);
79061+
79062 if ((PAGE_SIZE-1) & (unsigned long)addr)
79063 return -EINVAL;
79064
79065diff --git a/mm/vmstat.c b/mm/vmstat.c
79066index 42d76c6..5643dc4 100644
79067--- a/mm/vmstat.c
79068+++ b/mm/vmstat.c
79069@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
79070 *
79071 * vm_stat contains the global counters
79072 */
79073-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79074+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79075 EXPORT_SYMBOL(vm_stat);
79076
79077 #ifdef CONFIG_SMP
79078@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
79079 v = p->vm_stat_diff[i];
79080 p->vm_stat_diff[i] = 0;
79081 local_irq_restore(flags);
79082- atomic_long_add(v, &zone->vm_stat[i]);
79083+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
79084 global_diff[i] += v;
79085 #ifdef CONFIG_NUMA
79086 /* 3 seconds idle till flush */
79087@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
79088
79089 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
79090 if (global_diff[i])
79091- atomic_long_add(global_diff[i], &vm_stat[i]);
79092+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
79093 }
79094
79095 #endif
79096@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
79097 start_cpu_timer(cpu);
79098 #endif
79099 #ifdef CONFIG_PROC_FS
79100- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
79101- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
79102- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
79103- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
79104+ {
79105+ mode_t gr_mode = S_IRUGO;
79106+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79107+ gr_mode = S_IRUSR;
79108+#endif
79109+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
79110+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
79111+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
79112+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
79113+#else
79114+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
79115+#endif
79116+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
79117+ }
79118 #endif
79119 return 0;
79120 }
79121diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
79122index a29c5ab..6143f20 100644
79123--- a/net/8021q/vlan.c
79124+++ b/net/8021q/vlan.c
79125@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
79126 err = -EPERM;
79127 if (!capable(CAP_NET_ADMIN))
79128 break;
79129- if ((args.u.name_type >= 0) &&
79130- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
79131+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
79132 struct vlan_net *vn;
79133
79134 vn = net_generic(net, vlan_net_id);
79135diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
79136index a2d2984..f9eb711 100644
79137--- a/net/9p/trans_fd.c
79138+++ b/net/9p/trans_fd.c
79139@@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
79140 oldfs = get_fs();
79141 set_fs(get_ds());
79142 /* The cast to a user pointer is valid due to the set_fs() */
79143- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
79144+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
79145 set_fs(oldfs);
79146
79147 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
79148diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
79149index 02cc7e7..4514f1b 100644
79150--- a/net/atm/atm_misc.c
79151+++ b/net/atm/atm_misc.c
79152@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
79153 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
79154 return 1;
79155 atm_return(vcc,truesize);
79156- atomic_inc(&vcc->stats->rx_drop);
79157+ atomic_inc_unchecked(&vcc->stats->rx_drop);
79158 return 0;
79159 }
79160
79161@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
79162 }
79163 }
79164 atm_return(vcc,guess);
79165- atomic_inc(&vcc->stats->rx_drop);
79166+ atomic_inc_unchecked(&vcc->stats->rx_drop);
79167 return NULL;
79168 }
79169
79170@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
79171
79172 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
79173 {
79174-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
79175+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
79176 __SONET_ITEMS
79177 #undef __HANDLE_ITEM
79178 }
79179@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
79180
79181 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
79182 {
79183-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
79184+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
79185 __SONET_ITEMS
79186 #undef __HANDLE_ITEM
79187 }
79188diff --git a/net/atm/lec.h b/net/atm/lec.h
79189index 9d14d19..5c145f3 100644
79190--- a/net/atm/lec.h
79191+++ b/net/atm/lec.h
79192@@ -48,7 +48,7 @@ struct lane2_ops {
79193 const u8 *tlvs, u32 sizeoftlvs);
79194 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
79195 const u8 *tlvs, u32 sizeoftlvs);
79196-};
79197+} __no_const;
79198
79199 /*
79200 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
79201diff --git a/net/atm/mpc.h b/net/atm/mpc.h
79202index 0919a88..a23d54e 100644
79203--- a/net/atm/mpc.h
79204+++ b/net/atm/mpc.h
79205@@ -33,7 +33,7 @@ struct mpoa_client {
79206 struct mpc_parameters parameters; /* parameters for this client */
79207
79208 const struct net_device_ops *old_ops;
79209- struct net_device_ops new_ops;
79210+ net_device_ops_no_const new_ops;
79211 };
79212
79213
79214diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
79215index 4504a4b..1733f1e 100644
79216--- a/net/atm/mpoa_caches.c
79217+++ b/net/atm/mpoa_caches.c
79218@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
79219 struct timeval now;
79220 struct k_message msg;
79221
79222+ pax_track_stack();
79223+
79224 do_gettimeofday(&now);
79225
79226 write_lock_irq(&client->egress_lock);
79227diff --git a/net/atm/proc.c b/net/atm/proc.c
79228index ab8419a..aa91497 100644
79229--- a/net/atm/proc.c
79230+++ b/net/atm/proc.c
79231@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
79232 const struct k_atm_aal_stats *stats)
79233 {
79234 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
79235- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
79236- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
79237- atomic_read(&stats->rx_drop));
79238+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
79239+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
79240+ atomic_read_unchecked(&stats->rx_drop));
79241 }
79242
79243 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
79244@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
79245 {
79246 struct sock *sk = sk_atm(vcc);
79247
79248+#ifdef CONFIG_GRKERNSEC_HIDESYM
79249+ seq_printf(seq, "%p ", NULL);
79250+#else
79251 seq_printf(seq, "%p ", vcc);
79252+#endif
79253+
79254 if (!vcc->dev)
79255 seq_printf(seq, "Unassigned ");
79256 else
79257@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
79258 {
79259 if (!vcc->dev)
79260 seq_printf(seq, sizeof(void *) == 4 ?
79261+#ifdef CONFIG_GRKERNSEC_HIDESYM
79262+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
79263+#else
79264 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
79265+#endif
79266 else
79267 seq_printf(seq, "%3d %3d %5d ",
79268 vcc->dev->number, vcc->vpi, vcc->vci);
79269diff --git a/net/atm/resources.c b/net/atm/resources.c
79270index 56b7322..c48b84e 100644
79271--- a/net/atm/resources.c
79272+++ b/net/atm/resources.c
79273@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
79274 static void copy_aal_stats(struct k_atm_aal_stats *from,
79275 struct atm_aal_stats *to)
79276 {
79277-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
79278+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
79279 __AAL_STAT_ITEMS
79280 #undef __HANDLE_ITEM
79281 }
79282@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
79283 static void subtract_aal_stats(struct k_atm_aal_stats *from,
79284 struct atm_aal_stats *to)
79285 {
79286-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
79287+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
79288 __AAL_STAT_ITEMS
79289 #undef __HANDLE_ITEM
79290 }
79291diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
79292index 8567d47..bba2292 100644
79293--- a/net/bridge/br_private.h
79294+++ b/net/bridge/br_private.h
79295@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
79296
79297 #ifdef CONFIG_SYSFS
79298 /* br_sysfs_if.c */
79299-extern struct sysfs_ops brport_sysfs_ops;
79300+extern const struct sysfs_ops brport_sysfs_ops;
79301 extern int br_sysfs_addif(struct net_bridge_port *p);
79302
79303 /* br_sysfs_br.c */
79304diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
79305index 9a52ac5..c97538e 100644
79306--- a/net/bridge/br_stp_if.c
79307+++ b/net/bridge/br_stp_if.c
79308@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
79309 char *envp[] = { NULL };
79310
79311 if (br->stp_enabled == BR_USER_STP) {
79312- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
79313+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
79314 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
79315 br->dev->name, r);
79316
79317diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
79318index 820643a..ce77fb3 100644
79319--- a/net/bridge/br_sysfs_if.c
79320+++ b/net/bridge/br_sysfs_if.c
79321@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
79322 return ret;
79323 }
79324
79325-struct sysfs_ops brport_sysfs_ops = {
79326+const struct sysfs_ops brport_sysfs_ops = {
79327 .show = brport_show,
79328 .store = brport_store,
79329 };
79330diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
79331index d73d47f..72df42a 100644
79332--- a/net/bridge/netfilter/ebtables.c
79333+++ b/net/bridge/netfilter/ebtables.c
79334@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
79335 unsigned int entries_size, nentries;
79336 char *entries;
79337
79338+ pax_track_stack();
79339+
79340 if (cmd == EBT_SO_GET_ENTRIES) {
79341 entries_size = t->private->entries_size;
79342 nentries = t->private->nentries;
79343diff --git a/net/can/bcm.c b/net/can/bcm.c
79344index 2ffd2e0..72a7486 100644
79345--- a/net/can/bcm.c
79346+++ b/net/can/bcm.c
79347@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
79348 struct bcm_sock *bo = bcm_sk(sk);
79349 struct bcm_op *op;
79350
79351+#ifdef CONFIG_GRKERNSEC_HIDESYM
79352+ seq_printf(m, ">>> socket %p", NULL);
79353+ seq_printf(m, " / sk %p", NULL);
79354+ seq_printf(m, " / bo %p", NULL);
79355+#else
79356 seq_printf(m, ">>> socket %p", sk->sk_socket);
79357 seq_printf(m, " / sk %p", sk);
79358 seq_printf(m, " / bo %p", bo);
79359+#endif
79360 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
79361 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
79362 seq_printf(m, " <<<\n");
79363diff --git a/net/compat.c b/net/compat.c
79364index 9559afc..ccd74e1 100644
79365--- a/net/compat.c
79366+++ b/net/compat.c
79367@@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
79368 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
79369 __get_user(kmsg->msg_flags, &umsg->msg_flags))
79370 return -EFAULT;
79371- kmsg->msg_name = compat_ptr(tmp1);
79372- kmsg->msg_iov = compat_ptr(tmp2);
79373- kmsg->msg_control = compat_ptr(tmp3);
79374+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
79375+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
79376+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
79377 return 0;
79378 }
79379
79380@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
79381 kern_msg->msg_name = NULL;
79382
79383 tot_len = iov_from_user_compat_to_kern(kern_iov,
79384- (struct compat_iovec __user *)kern_msg->msg_iov,
79385+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
79386 kern_msg->msg_iovlen);
79387 if (tot_len >= 0)
79388 kern_msg->msg_iov = kern_iov;
79389@@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
79390
79391 #define CMSG_COMPAT_FIRSTHDR(msg) \
79392 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
79393- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
79394+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
79395 (struct compat_cmsghdr __user *)NULL)
79396
79397 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
79398 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
79399 (ucmlen) <= (unsigned long) \
79400 ((mhdr)->msg_controllen - \
79401- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
79402+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
79403
79404 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
79405 struct compat_cmsghdr __user *cmsg, int cmsg_len)
79406 {
79407 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
79408- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
79409+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
79410 msg->msg_controllen)
79411 return NULL;
79412 return (struct compat_cmsghdr __user *)ptr;
79413@@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
79414 {
79415 struct compat_timeval ctv;
79416 struct compat_timespec cts[3];
79417- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
79418+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
79419 struct compat_cmsghdr cmhdr;
79420 int cmlen;
79421
79422@@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
79423
79424 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
79425 {
79426- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
79427+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
79428 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
79429 int fdnum = scm->fp->count;
79430 struct file **fp = scm->fp->fp;
79431@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
79432 len = sizeof(ktime);
79433 old_fs = get_fs();
79434 set_fs(KERNEL_DS);
79435- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
79436+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
79437 set_fs(old_fs);
79438
79439 if (!err) {
79440@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
79441 case MCAST_JOIN_GROUP:
79442 case MCAST_LEAVE_GROUP:
79443 {
79444- struct compat_group_req __user *gr32 = (void *)optval;
79445+ struct compat_group_req __user *gr32 = (void __user *)optval;
79446 struct group_req __user *kgr =
79447 compat_alloc_user_space(sizeof(struct group_req));
79448 u32 interface;
79449@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
79450 case MCAST_BLOCK_SOURCE:
79451 case MCAST_UNBLOCK_SOURCE:
79452 {
79453- struct compat_group_source_req __user *gsr32 = (void *)optval;
79454+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
79455 struct group_source_req __user *kgsr = compat_alloc_user_space(
79456 sizeof(struct group_source_req));
79457 u32 interface;
79458@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
79459 }
79460 case MCAST_MSFILTER:
79461 {
79462- struct compat_group_filter __user *gf32 = (void *)optval;
79463+ struct compat_group_filter __user *gf32 = (void __user *)optval;
79464 struct group_filter __user *kgf;
79465 u32 interface, fmode, numsrc;
79466
79467diff --git a/net/core/dev.c b/net/core/dev.c
79468index 64eb849..7b5948b 100644
79469--- a/net/core/dev.c
79470+++ b/net/core/dev.c
79471@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
79472 if (no_module && capable(CAP_NET_ADMIN))
79473 no_module = request_module("netdev-%s", name);
79474 if (no_module && capable(CAP_SYS_MODULE)) {
79475+#ifdef CONFIG_GRKERNSEC_MODHARDEN
79476+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
79477+#else
79478 if (!request_module("%s", name))
79479 pr_err("Loading kernel module for a network device "
79480 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
79481 "instead\n", name);
79482+#endif
79483 }
79484 }
79485 EXPORT_SYMBOL(dev_load);
79486@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
79487
79488 struct dev_gso_cb {
79489 void (*destructor)(struct sk_buff *skb);
79490-};
79491+} __no_const;
79492
79493 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
79494
79495@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
79496 }
79497 EXPORT_SYMBOL(netif_rx_ni);
79498
79499-static void net_tx_action(struct softirq_action *h)
79500+static void net_tx_action(void)
79501 {
79502 struct softnet_data *sd = &__get_cpu_var(softnet_data);
79503
79504@@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *napi)
79505 EXPORT_SYMBOL(netif_napi_del);
79506
79507
79508-static void net_rx_action(struct softirq_action *h)
79509+static void net_rx_action(void)
79510 {
79511 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
79512 unsigned long time_limit = jiffies + 2;
79513diff --git a/net/core/flow.c b/net/core/flow.c
79514index 9601587..8c4824e 100644
79515--- a/net/core/flow.c
79516+++ b/net/core/flow.c
79517@@ -35,11 +35,11 @@ struct flow_cache_entry {
79518 atomic_t *object_ref;
79519 };
79520
79521-atomic_t flow_cache_genid = ATOMIC_INIT(0);
79522+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
79523
79524 static u32 flow_hash_shift;
79525 #define flow_hash_size (1 << flow_hash_shift)
79526-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
79527+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
79528
79529 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
79530
79531@@ -52,7 +52,7 @@ struct flow_percpu_info {
79532 u32 hash_rnd;
79533 int count;
79534 };
79535-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
79536+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
79537
79538 #define flow_hash_rnd_recalc(cpu) \
79539 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
79540@@ -69,7 +69,7 @@ struct flow_flush_info {
79541 atomic_t cpuleft;
79542 struct completion completion;
79543 };
79544-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
79545+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
79546
79547 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
79548
79549@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
79550 if (fle->family == family &&
79551 fle->dir == dir &&
79552 flow_key_compare(key, &fle->key) == 0) {
79553- if (fle->genid == atomic_read(&flow_cache_genid)) {
79554+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
79555 void *ret = fle->object;
79556
79557 if (ret)
79558@@ -228,7 +228,7 @@ nocache:
79559 err = resolver(net, key, family, dir, &obj, &obj_ref);
79560
79561 if (fle && !err) {
79562- fle->genid = atomic_read(&flow_cache_genid);
79563+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
79564
79565 if (fle->object)
79566 atomic_dec(fle->object_ref);
79567@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
79568
79569 fle = flow_table(cpu)[i];
79570 for (; fle; fle = fle->next) {
79571- unsigned genid = atomic_read(&flow_cache_genid);
79572+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
79573
79574 if (!fle->object || fle->genid == genid)
79575 continue;
79576diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
79577index d4fd895..ac9b1e6 100644
79578--- a/net/core/rtnetlink.c
79579+++ b/net/core/rtnetlink.c
79580@@ -57,7 +57,7 @@ struct rtnl_link
79581 {
79582 rtnl_doit_func doit;
79583 rtnl_dumpit_func dumpit;
79584-};
79585+} __no_const;
79586
79587 static DEFINE_MUTEX(rtnl_mutex);
79588
79589diff --git a/net/core/scm.c b/net/core/scm.c
79590index d98eafc..1a190a9 100644
79591--- a/net/core/scm.c
79592+++ b/net/core/scm.c
79593@@ -191,7 +191,7 @@ error:
79594 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
79595 {
79596 struct cmsghdr __user *cm
79597- = (__force struct cmsghdr __user *)msg->msg_control;
79598+ = (struct cmsghdr __force_user *)msg->msg_control;
79599 struct cmsghdr cmhdr;
79600 int cmlen = CMSG_LEN(len);
79601 int err;
79602@@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
79603 err = -EFAULT;
79604 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
79605 goto out;
79606- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
79607+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
79608 goto out;
79609 cmlen = CMSG_SPACE(len);
79610 if (msg->msg_controllen < cmlen)
79611@@ -229,7 +229,7 @@ out:
79612 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
79613 {
79614 struct cmsghdr __user *cm
79615- = (__force struct cmsghdr __user*)msg->msg_control;
79616+ = (struct cmsghdr __force_user *)msg->msg_control;
79617
79618 int fdmax = 0;
79619 int fdnum = scm->fp->count;
79620@@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
79621 if (fdnum < fdmax)
79622 fdmax = fdnum;
79623
79624- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
79625+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
79626 i++, cmfptr++)
79627 {
79628 int new_fd;
79629diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
79630index 45329d7..626aaa6 100644
79631--- a/net/core/secure_seq.c
79632+++ b/net/core/secure_seq.c
79633@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
79634 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
79635
79636 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
79637- __be16 dport)
79638+ __be16 dport)
79639 {
79640 u32 secret[MD5_MESSAGE_BYTES / 4];
79641 u32 hash[MD5_DIGEST_WORDS];
79642@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
79643 secret[i] = net_secret[i];
79644
79645 md5_transform(hash, secret);
79646-
79647 return hash[0];
79648 }
79649 #endif
79650diff --git a/net/core/skbuff.c b/net/core/skbuff.c
79651index a807f8c..65f906f 100644
79652--- a/net/core/skbuff.c
79653+++ b/net/core/skbuff.c
79654@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
79655 struct sk_buff *frag_iter;
79656 struct sock *sk = skb->sk;
79657
79658+ pax_track_stack();
79659+
79660 /*
79661 * __skb_splice_bits() only fails if the output has no room left,
79662 * so no point in going over the frag_list for the error case.
79663diff --git a/net/core/sock.c b/net/core/sock.c
79664index 6605e75..3acebda 100644
79665--- a/net/core/sock.c
79666+++ b/net/core/sock.c
79667@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
79668 break;
79669
79670 case SO_PEERCRED:
79671+ {
79672+ struct ucred peercred;
79673 if (len > sizeof(sk->sk_peercred))
79674 len = sizeof(sk->sk_peercred);
79675- if (copy_to_user(optval, &sk->sk_peercred, len))
79676+ peercred = sk->sk_peercred;
79677+ if (copy_to_user(optval, &peercred, len))
79678 return -EFAULT;
79679 goto lenout;
79680+ }
79681
79682 case SO_PEERNAME:
79683 {
79684@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
79685 */
79686 smp_wmb();
79687 atomic_set(&sk->sk_refcnt, 1);
79688- atomic_set(&sk->sk_drops, 0);
79689+ atomic_set_unchecked(&sk->sk_drops, 0);
79690 }
79691 EXPORT_SYMBOL(sock_init_data);
79692
79693diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
79694index 2036568..c55883d 100644
79695--- a/net/decnet/sysctl_net_decnet.c
79696+++ b/net/decnet/sysctl_net_decnet.c
79697@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
79698
79699 if (len > *lenp) len = *lenp;
79700
79701- if (copy_to_user(buffer, addr, len))
79702+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
79703 return -EFAULT;
79704
79705 *lenp = len;
79706@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
79707
79708 if (len > *lenp) len = *lenp;
79709
79710- if (copy_to_user(buffer, devname, len))
79711+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
79712 return -EFAULT;
79713
79714 *lenp = len;
79715diff --git a/net/econet/Kconfig b/net/econet/Kconfig
79716index 39a2d29..f39c0fe 100644
79717--- a/net/econet/Kconfig
79718+++ b/net/econet/Kconfig
79719@@ -4,7 +4,7 @@
79720
79721 config ECONET
79722 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
79723- depends on EXPERIMENTAL && INET
79724+ depends on EXPERIMENTAL && INET && BROKEN
79725 ---help---
79726 Econet is a fairly old and slow networking protocol mainly used by
79727 Acorn computers to access file and print servers. It uses native
79728diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
79729index a413b1b..380849c 100644
79730--- a/net/ieee802154/dgram.c
79731+++ b/net/ieee802154/dgram.c
79732@@ -318,7 +318,7 @@ out:
79733 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
79734 {
79735 if (sock_queue_rcv_skb(sk, skb) < 0) {
79736- atomic_inc(&sk->sk_drops);
79737+ atomic_inc_unchecked(&sk->sk_drops);
79738 kfree_skb(skb);
79739 return NET_RX_DROP;
79740 }
79741diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
79742index 30e74ee..bfc6ee0 100644
79743--- a/net/ieee802154/raw.c
79744+++ b/net/ieee802154/raw.c
79745@@ -206,7 +206,7 @@ out:
79746 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
79747 {
79748 if (sock_queue_rcv_skb(sk, skb) < 0) {
79749- atomic_inc(&sk->sk_drops);
79750+ atomic_inc_unchecked(&sk->sk_drops);
79751 kfree_skb(skb);
79752 return NET_RX_DROP;
79753 }
79754diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
79755index dba56d2..acee5d6 100644
79756--- a/net/ipv4/inet_diag.c
79757+++ b/net/ipv4/inet_diag.c
79758@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
79759 r->idiag_retrans = 0;
79760
79761 r->id.idiag_if = sk->sk_bound_dev_if;
79762+#ifdef CONFIG_GRKERNSEC_HIDESYM
79763+ r->id.idiag_cookie[0] = 0;
79764+ r->id.idiag_cookie[1] = 0;
79765+#else
79766 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
79767 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
79768+#endif
79769
79770 r->id.idiag_sport = inet->sport;
79771 r->id.idiag_dport = inet->dport;
79772@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
79773 r->idiag_family = tw->tw_family;
79774 r->idiag_retrans = 0;
79775 r->id.idiag_if = tw->tw_bound_dev_if;
79776+
79777+#ifdef CONFIG_GRKERNSEC_HIDESYM
79778+ r->id.idiag_cookie[0] = 0;
79779+ r->id.idiag_cookie[1] = 0;
79780+#else
79781 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
79782 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
79783+#endif
79784+
79785 r->id.idiag_sport = tw->tw_sport;
79786 r->id.idiag_dport = tw->tw_dport;
79787 r->id.idiag_src[0] = tw->tw_rcv_saddr;
79788@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
79789 if (sk == NULL)
79790 goto unlock;
79791
79792+#ifndef CONFIG_GRKERNSEC_HIDESYM
79793 err = -ESTALE;
79794 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
79795 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
79796 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
79797 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
79798 goto out;
79799+#endif
79800
79801 err = -ENOMEM;
79802 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
79803@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
79804 r->idiag_retrans = req->retrans;
79805
79806 r->id.idiag_if = sk->sk_bound_dev_if;
79807+
79808+#ifdef CONFIG_GRKERNSEC_HIDESYM
79809+ r->id.idiag_cookie[0] = 0;
79810+ r->id.idiag_cookie[1] = 0;
79811+#else
79812 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
79813 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
79814+#endif
79815
79816 tmo = req->expires - jiffies;
79817 if (tmo < 0)
79818diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
79819index d717267..56de7e7 100644
79820--- a/net/ipv4/inet_hashtables.c
79821+++ b/net/ipv4/inet_hashtables.c
79822@@ -18,12 +18,15 @@
79823 #include <linux/sched.h>
79824 #include <linux/slab.h>
79825 #include <linux/wait.h>
79826+#include <linux/security.h>
79827
79828 #include <net/inet_connection_sock.h>
79829 #include <net/inet_hashtables.h>
79830 #include <net/secure_seq.h>
79831 #include <net/ip.h>
79832
79833+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
79834+
79835 /*
79836 * Allocate and initialize a new local port bind bucket.
79837 * The bindhash mutex for snum's hash chain must be held here.
79838@@ -491,6 +494,8 @@ ok:
79839 }
79840 spin_unlock(&head->lock);
79841
79842+ gr_update_task_in_ip_table(current, inet_sk(sk));
79843+
79844 if (tw) {
79845 inet_twsk_deschedule(tw, death_row);
79846 inet_twsk_put(tw);
79847diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
79848index 13b229f..6956484 100644
79849--- a/net/ipv4/inetpeer.c
79850+++ b/net/ipv4/inetpeer.c
79851@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
79852 struct inet_peer *p, *n;
79853 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
79854
79855+ pax_track_stack();
79856+
79857 /* Look up for the address quickly. */
79858 read_lock_bh(&peer_pool_lock);
79859 p = lookup(daddr, NULL);
79860@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
79861 return NULL;
79862 n->v4daddr = daddr;
79863 atomic_set(&n->refcnt, 1);
79864- atomic_set(&n->rid, 0);
79865+ atomic_set_unchecked(&n->rid, 0);
79866 n->ip_id_count = secure_ip_id(daddr);
79867 n->tcp_ts_stamp = 0;
79868
79869diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
79870index d3fe10b..feeafc9 100644
79871--- a/net/ipv4/ip_fragment.c
79872+++ b/net/ipv4/ip_fragment.c
79873@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
79874 return 0;
79875
79876 start = qp->rid;
79877- end = atomic_inc_return(&peer->rid);
79878+ end = atomic_inc_return_unchecked(&peer->rid);
79879 qp->rid = end;
79880
79881 rc = qp->q.fragments && (end - start) > max;
79882diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
79883index e982b5c..f079d75 100644
79884--- a/net/ipv4/ip_sockglue.c
79885+++ b/net/ipv4/ip_sockglue.c
79886@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
79887 int val;
79888 int len;
79889
79890+ pax_track_stack();
79891+
79892 if (level != SOL_IP)
79893 return -EOPNOTSUPP;
79894
79895@@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
79896 if (sk->sk_type != SOCK_STREAM)
79897 return -ENOPROTOOPT;
79898
79899- msg.msg_control = optval;
79900+ msg.msg_control = (void __force_kernel *)optval;
79901 msg.msg_controllen = len;
79902 msg.msg_flags = 0;
79903
79904diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
79905index f8d04c2..c1188f2 100644
79906--- a/net/ipv4/ipconfig.c
79907+++ b/net/ipv4/ipconfig.c
79908@@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
79909
79910 mm_segment_t oldfs = get_fs();
79911 set_fs(get_ds());
79912- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
79913+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
79914 set_fs(oldfs);
79915 return res;
79916 }
79917@@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
79918
79919 mm_segment_t oldfs = get_fs();
79920 set_fs(get_ds());
79921- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
79922+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
79923 set_fs(oldfs);
79924 return res;
79925 }
79926@@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
79927
79928 mm_segment_t oldfs = get_fs();
79929 set_fs(get_ds());
79930- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
79931+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
79932 set_fs(oldfs);
79933 return res;
79934 }
79935diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
79936index c8b0cc3..4da5ae2 100644
79937--- a/net/ipv4/netfilter/arp_tables.c
79938+++ b/net/ipv4/netfilter/arp_tables.c
79939@@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
79940 private = &tmp;
79941 }
79942 #endif
79943+ memset(&info, 0, sizeof(info));
79944 info.valid_hooks = t->valid_hooks;
79945 memcpy(info.hook_entry, private->hook_entry,
79946 sizeof(info.hook_entry));
79947diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
79948index c156db2..e772975 100644
79949--- a/net/ipv4/netfilter/ip_queue.c
79950+++ b/net/ipv4/netfilter/ip_queue.c
79951@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
79952
79953 if (v->data_len < sizeof(*user_iph))
79954 return 0;
79955+ if (v->data_len > 65535)
79956+ return -EMSGSIZE;
79957+
79958 diff = v->data_len - e->skb->len;
79959 if (diff < 0) {
79960 if (pskb_trim(e->skb, v->data_len))
79961@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
79962 static inline void
79963 __ipq_rcv_skb(struct sk_buff *skb)
79964 {
79965- int status, type, pid, flags, nlmsglen, skblen;
79966+ int status, type, pid, flags;
79967+ unsigned int nlmsglen, skblen;
79968 struct nlmsghdr *nlh;
79969
79970 skblen = skb->len;
79971diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
79972index 0606db1..02e7e4c 100644
79973--- a/net/ipv4/netfilter/ip_tables.c
79974+++ b/net/ipv4/netfilter/ip_tables.c
79975@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
79976 private = &tmp;
79977 }
79978 #endif
79979+ memset(&info, 0, sizeof(info));
79980 info.valid_hooks = t->valid_hooks;
79981 memcpy(info.hook_entry, private->hook_entry,
79982 sizeof(info.hook_entry));
79983diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
79984index d9521f6..3c3eb25 100644
79985--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
79986+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
79987@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
79988
79989 *len = 0;
79990
79991- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
79992+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
79993 if (*octets == NULL) {
79994 if (net_ratelimit())
79995 printk("OOM in bsalg (%d)\n", __LINE__);
79996diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
79997index ab996f9..3da5f96 100644
79998--- a/net/ipv4/raw.c
79999+++ b/net/ipv4/raw.c
80000@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
80001 /* Charge it to the socket. */
80002
80003 if (sock_queue_rcv_skb(sk, skb) < 0) {
80004- atomic_inc(&sk->sk_drops);
80005+ atomic_inc_unchecked(&sk->sk_drops);
80006 kfree_skb(skb);
80007 return NET_RX_DROP;
80008 }
80009@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
80010 int raw_rcv(struct sock *sk, struct sk_buff *skb)
80011 {
80012 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
80013- atomic_inc(&sk->sk_drops);
80014+ atomic_inc_unchecked(&sk->sk_drops);
80015 kfree_skb(skb);
80016 return NET_RX_DROP;
80017 }
80018@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
80019
80020 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
80021 {
80022+ struct icmp_filter filter;
80023+
80024+ if (optlen < 0)
80025+ return -EINVAL;
80026 if (optlen > sizeof(struct icmp_filter))
80027 optlen = sizeof(struct icmp_filter);
80028- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
80029+ if (copy_from_user(&filter, optval, optlen))
80030 return -EFAULT;
80031+ raw_sk(sk)->filter = filter;
80032+
80033 return 0;
80034 }
80035
80036 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
80037 {
80038 int len, ret = -EFAULT;
80039+ struct icmp_filter filter;
80040
80041 if (get_user(len, optlen))
80042 goto out;
80043@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
80044 if (len > sizeof(struct icmp_filter))
80045 len = sizeof(struct icmp_filter);
80046 ret = -EFAULT;
80047- if (put_user(len, optlen) ||
80048- copy_to_user(optval, &raw_sk(sk)->filter, len))
80049+ filter = raw_sk(sk)->filter;
80050+ if (put_user(len, optlen) || len > sizeof filter ||
80051+ copy_to_user(optval, &filter, len))
80052 goto out;
80053 ret = 0;
80054 out: return ret;
80055@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
80056 sk_wmem_alloc_get(sp),
80057 sk_rmem_alloc_get(sp),
80058 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
80059- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
80060+ atomic_read(&sp->sk_refcnt),
80061+#ifdef CONFIG_GRKERNSEC_HIDESYM
80062+ NULL,
80063+#else
80064+ sp,
80065+#endif
80066+ atomic_read_unchecked(&sp->sk_drops));
80067 }
80068
80069 static int raw_seq_show(struct seq_file *seq, void *v)
80070diff --git a/net/ipv4/route.c b/net/ipv4/route.c
80071index 58f141b..b759702 100644
80072--- a/net/ipv4/route.c
80073+++ b/net/ipv4/route.c
80074@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
80075
80076 static inline int rt_genid(struct net *net)
80077 {
80078- return atomic_read(&net->ipv4.rt_genid);
80079+ return atomic_read_unchecked(&net->ipv4.rt_genid);
80080 }
80081
80082 #ifdef CONFIG_PROC_FS
80083@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
80084 unsigned char shuffle;
80085
80086 get_random_bytes(&shuffle, sizeof(shuffle));
80087- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
80088+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
80089 }
80090
80091 /*
80092@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
80093
80094 static __net_init int rt_secret_timer_init(struct net *net)
80095 {
80096- atomic_set(&net->ipv4.rt_genid,
80097+ atomic_set_unchecked(&net->ipv4.rt_genid,
80098 (int) ((num_physpages ^ (num_physpages>>8)) ^
80099 (jiffies ^ (jiffies >> 7))));
80100
80101diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
80102index f095659..adc892a 100644
80103--- a/net/ipv4/tcp.c
80104+++ b/net/ipv4/tcp.c
80105@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
80106 int val;
80107 int err = 0;
80108
80109+ pax_track_stack();
80110+
80111 /* This is a string value all the others are int's */
80112 if (optname == TCP_CONGESTION) {
80113 char name[TCP_CA_NAME_MAX];
80114@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
80115 struct tcp_sock *tp = tcp_sk(sk);
80116 int val, len;
80117
80118+ pax_track_stack();
80119+
80120 if (get_user(len, optlen))
80121 return -EFAULT;
80122
80123diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
80124index 6fc7961..33bad4a 100644
80125--- a/net/ipv4/tcp_ipv4.c
80126+++ b/net/ipv4/tcp_ipv4.c
80127@@ -85,6 +85,9 @@
80128 int sysctl_tcp_tw_reuse __read_mostly;
80129 int sysctl_tcp_low_latency __read_mostly;
80130
80131+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80132+extern int grsec_enable_blackhole;
80133+#endif
80134
80135 #ifdef CONFIG_TCP_MD5SIG
80136 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
80137@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
80138 return 0;
80139
80140 reset:
80141+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80142+ if (!grsec_enable_blackhole)
80143+#endif
80144 tcp_v4_send_reset(rsk, skb);
80145 discard:
80146 kfree_skb(skb);
80147@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
80148 TCP_SKB_CB(skb)->sacked = 0;
80149
80150 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
80151- if (!sk)
80152+ if (!sk) {
80153+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80154+ ret = 1;
80155+#endif
80156 goto no_tcp_socket;
80157+ }
80158
80159 process:
80160- if (sk->sk_state == TCP_TIME_WAIT)
80161+ if (sk->sk_state == TCP_TIME_WAIT) {
80162+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80163+ ret = 2;
80164+#endif
80165 goto do_time_wait;
80166+ }
80167
80168 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
80169 goto discard_and_relse;
80170@@ -1651,6 +1665,10 @@ no_tcp_socket:
80171 bad_packet:
80172 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
80173 } else {
80174+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80175+ if (!grsec_enable_blackhole || (ret == 1 &&
80176+ (skb->dev->flags & IFF_LOOPBACK)))
80177+#endif
80178 tcp_v4_send_reset(NULL, skb);
80179 }
80180
80181@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
80182 0, /* non standard timer */
80183 0, /* open_requests have no inode */
80184 atomic_read(&sk->sk_refcnt),
80185+#ifdef CONFIG_GRKERNSEC_HIDESYM
80186+ NULL,
80187+#else
80188 req,
80189+#endif
80190 len);
80191 }
80192
80193@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
80194 sock_i_uid(sk),
80195 icsk->icsk_probes_out,
80196 sock_i_ino(sk),
80197- atomic_read(&sk->sk_refcnt), sk,
80198+ atomic_read(&sk->sk_refcnt),
80199+#ifdef CONFIG_GRKERNSEC_HIDESYM
80200+ NULL,
80201+#else
80202+ sk,
80203+#endif
80204 jiffies_to_clock_t(icsk->icsk_rto),
80205 jiffies_to_clock_t(icsk->icsk_ack.ato),
80206 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
80207@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
80208 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
80209 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
80210 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
80211- atomic_read(&tw->tw_refcnt), tw, len);
80212+ atomic_read(&tw->tw_refcnt),
80213+#ifdef CONFIG_GRKERNSEC_HIDESYM
80214+ NULL,
80215+#else
80216+ tw,
80217+#endif
80218+ len);
80219 }
80220
80221 #define TMPSZ 150
80222diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
80223index 4c03598..e09a8e8 100644
80224--- a/net/ipv4/tcp_minisocks.c
80225+++ b/net/ipv4/tcp_minisocks.c
80226@@ -26,6 +26,10 @@
80227 #include <net/inet_common.h>
80228 #include <net/xfrm.h>
80229
80230+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80231+extern int grsec_enable_blackhole;
80232+#endif
80233+
80234 #ifdef CONFIG_SYSCTL
80235 #define SYNC_INIT 0 /* let the user enable it */
80236 #else
80237@@ -672,6 +676,10 @@ listen_overflow:
80238
80239 embryonic_reset:
80240 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
80241+
80242+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80243+ if (!grsec_enable_blackhole)
80244+#endif
80245 if (!(flg & TCP_FLAG_RST))
80246 req->rsk_ops->send_reset(sk, skb);
80247
80248diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
80249index af83bdf..ec91cb2 100644
80250--- a/net/ipv4/tcp_output.c
80251+++ b/net/ipv4/tcp_output.c
80252@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
80253 __u8 *md5_hash_location;
80254 int mss;
80255
80256+ pax_track_stack();
80257+
80258 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
80259 if (skb == NULL)
80260 return NULL;
80261diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
80262index 59f5b5e..193860f 100644
80263--- a/net/ipv4/tcp_probe.c
80264+++ b/net/ipv4/tcp_probe.c
80265@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
80266 if (cnt + width >= len)
80267 break;
80268
80269- if (copy_to_user(buf + cnt, tbuf, width))
80270+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
80271 return -EFAULT;
80272 cnt += width;
80273 }
80274diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
80275index 57d5501..a9ed13a 100644
80276--- a/net/ipv4/tcp_timer.c
80277+++ b/net/ipv4/tcp_timer.c
80278@@ -21,6 +21,10 @@
80279 #include <linux/module.h>
80280 #include <net/tcp.h>
80281
80282+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80283+extern int grsec_lastack_retries;
80284+#endif
80285+
80286 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
80287 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
80288 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
80289@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
80290 }
80291 }
80292
80293+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80294+ if ((sk->sk_state == TCP_LAST_ACK) &&
80295+ (grsec_lastack_retries > 0) &&
80296+ (grsec_lastack_retries < retry_until))
80297+ retry_until = grsec_lastack_retries;
80298+#endif
80299+
80300 if (retransmits_timed_out(sk, retry_until)) {
80301 /* Has it gone just too far? */
80302 tcp_write_err(sk);
80303diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
80304index 0ac8833..58d8c43 100644
80305--- a/net/ipv4/udp.c
80306+++ b/net/ipv4/udp.c
80307@@ -86,6 +86,7 @@
80308 #include <linux/types.h>
80309 #include <linux/fcntl.h>
80310 #include <linux/module.h>
80311+#include <linux/security.h>
80312 #include <linux/socket.h>
80313 #include <linux/sockios.h>
80314 #include <linux/igmp.h>
80315@@ -106,6 +107,10 @@
80316 #include <net/xfrm.h>
80317 #include "udp_impl.h"
80318
80319+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80320+extern int grsec_enable_blackhole;
80321+#endif
80322+
80323 struct udp_table udp_table;
80324 EXPORT_SYMBOL(udp_table);
80325
80326@@ -371,6 +376,9 @@ found:
80327 return s;
80328 }
80329
80330+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
80331+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
80332+
80333 /*
80334 * This routine is called by the ICMP module when it gets some
80335 * sort of error condition. If err < 0 then the socket should
80336@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
80337 dport = usin->sin_port;
80338 if (dport == 0)
80339 return -EINVAL;
80340+
80341+ err = gr_search_udp_sendmsg(sk, usin);
80342+ if (err)
80343+ return err;
80344 } else {
80345 if (sk->sk_state != TCP_ESTABLISHED)
80346 return -EDESTADDRREQ;
80347+
80348+ err = gr_search_udp_sendmsg(sk, NULL);
80349+ if (err)
80350+ return err;
80351+
80352 daddr = inet->daddr;
80353 dport = inet->dport;
80354 /* Open fast path for connected socket.
80355@@ -945,6 +962,10 @@ try_again:
80356 if (!skb)
80357 goto out;
80358
80359+ err = gr_search_udp_recvmsg(sk, skb);
80360+ if (err)
80361+ goto out_free;
80362+
80363 ulen = skb->len - sizeof(struct udphdr);
80364 copied = len;
80365 if (copied > ulen)
80366@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
80367 if (rc == -ENOMEM) {
80368 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
80369 is_udplite);
80370- atomic_inc(&sk->sk_drops);
80371+ atomic_inc_unchecked(&sk->sk_drops);
80372 }
80373 goto drop;
80374 }
80375@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
80376 goto csum_error;
80377
80378 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
80379+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80380+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
80381+#endif
80382 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
80383
80384 /*
80385@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
80386 sk_wmem_alloc_get(sp),
80387 sk_rmem_alloc_get(sp),
80388 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
80389- atomic_read(&sp->sk_refcnt), sp,
80390- atomic_read(&sp->sk_drops), len);
80391+ atomic_read(&sp->sk_refcnt),
80392+#ifdef CONFIG_GRKERNSEC_HIDESYM
80393+ NULL,
80394+#else
80395+ sp,
80396+#endif
80397+ atomic_read_unchecked(&sp->sk_drops), len);
80398 }
80399
80400 int udp4_seq_show(struct seq_file *seq, void *v)
80401diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
80402index 8ac3d09..fc58c5f 100644
80403--- a/net/ipv6/addrconf.c
80404+++ b/net/ipv6/addrconf.c
80405@@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
80406 p.iph.ihl = 5;
80407 p.iph.protocol = IPPROTO_IPV6;
80408 p.iph.ttl = 64;
80409- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
80410+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
80411
80412 if (ops->ndo_do_ioctl) {
80413 mm_segment_t oldfs = get_fs();
80414diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
80415index cc4797d..7cfdfcc 100644
80416--- a/net/ipv6/inet6_connection_sock.c
80417+++ b/net/ipv6/inet6_connection_sock.c
80418@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
80419 #ifdef CONFIG_XFRM
80420 {
80421 struct rt6_info *rt = (struct rt6_info *)dst;
80422- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
80423+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
80424 }
80425 #endif
80426 }
80427@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
80428 #ifdef CONFIG_XFRM
80429 if (dst) {
80430 struct rt6_info *rt = (struct rt6_info *)dst;
80431- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
80432+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
80433 sk->sk_dst_cache = NULL;
80434 dst_release(dst);
80435 dst = NULL;
80436diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
80437index 093e9b2..f72cddb 100644
80438--- a/net/ipv6/inet6_hashtables.c
80439+++ b/net/ipv6/inet6_hashtables.c
80440@@ -119,7 +119,7 @@ out:
80441 }
80442 EXPORT_SYMBOL(__inet6_lookup_established);
80443
80444-static int inline compute_score(struct sock *sk, struct net *net,
80445+static inline int compute_score(struct sock *sk, struct net *net,
80446 const unsigned short hnum,
80447 const struct in6_addr *daddr,
80448 const int dif)
80449diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
80450index 4f7aaf6..f7acf45 100644
80451--- a/net/ipv6/ipv6_sockglue.c
80452+++ b/net/ipv6/ipv6_sockglue.c
80453@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
80454 int val, valbool;
80455 int retv = -ENOPROTOOPT;
80456
80457+ pax_track_stack();
80458+
80459 if (optval == NULL)
80460 val=0;
80461 else {
80462@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
80463 int len;
80464 int val;
80465
80466+ pax_track_stack();
80467+
80468 if (ip6_mroute_opt(optname))
80469 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
80470
80471@@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
80472 if (sk->sk_type != SOCK_STREAM)
80473 return -ENOPROTOOPT;
80474
80475- msg.msg_control = optval;
80476+ msg.msg_control = (void __force_kernel *)optval;
80477 msg.msg_controllen = len;
80478 msg.msg_flags = 0;
80479
80480diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
80481index 1cf3f0c..1d4376f 100644
80482--- a/net/ipv6/netfilter/ip6_queue.c
80483+++ b/net/ipv6/netfilter/ip6_queue.c
80484@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
80485
80486 if (v->data_len < sizeof(*user_iph))
80487 return 0;
80488+ if (v->data_len > 65535)
80489+ return -EMSGSIZE;
80490+
80491 diff = v->data_len - e->skb->len;
80492 if (diff < 0) {
80493 if (pskb_trim(e->skb, v->data_len))
80494@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
80495 static inline void
80496 __ipq_rcv_skb(struct sk_buff *skb)
80497 {
80498- int status, type, pid, flags, nlmsglen, skblen;
80499+ int status, type, pid, flags;
80500+ unsigned int nlmsglen, skblen;
80501 struct nlmsghdr *nlh;
80502
80503 skblen = skb->len;
80504diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
80505index 78b5a36..7f37433 100644
80506--- a/net/ipv6/netfilter/ip6_tables.c
80507+++ b/net/ipv6/netfilter/ip6_tables.c
80508@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
80509 private = &tmp;
80510 }
80511 #endif
80512+ memset(&info, 0, sizeof(info));
80513 info.valid_hooks = t->valid_hooks;
80514 memcpy(info.hook_entry, private->hook_entry,
80515 sizeof(info.hook_entry));
80516diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
80517index 4f24570..b813b34 100644
80518--- a/net/ipv6/raw.c
80519+++ b/net/ipv6/raw.c
80520@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
80521 {
80522 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
80523 skb_checksum_complete(skb)) {
80524- atomic_inc(&sk->sk_drops);
80525+ atomic_inc_unchecked(&sk->sk_drops);
80526 kfree_skb(skb);
80527 return NET_RX_DROP;
80528 }
80529
80530 /* Charge it to the socket. */
80531 if (sock_queue_rcv_skb(sk,skb)<0) {
80532- atomic_inc(&sk->sk_drops);
80533+ atomic_inc_unchecked(&sk->sk_drops);
80534 kfree_skb(skb);
80535 return NET_RX_DROP;
80536 }
80537@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
80538 struct raw6_sock *rp = raw6_sk(sk);
80539
80540 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
80541- atomic_inc(&sk->sk_drops);
80542+ atomic_inc_unchecked(&sk->sk_drops);
80543 kfree_skb(skb);
80544 return NET_RX_DROP;
80545 }
80546@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
80547
80548 if (inet->hdrincl) {
80549 if (skb_checksum_complete(skb)) {
80550- atomic_inc(&sk->sk_drops);
80551+ atomic_inc_unchecked(&sk->sk_drops);
80552 kfree_skb(skb);
80553 return NET_RX_DROP;
80554 }
80555@@ -518,7 +518,7 @@ csum_copy_err:
80556 as some normal condition.
80557 */
80558 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
80559- atomic_inc(&sk->sk_drops);
80560+ atomic_inc_unchecked(&sk->sk_drops);
80561 goto out;
80562 }
80563
80564@@ -600,7 +600,7 @@ out:
80565 return err;
80566 }
80567
80568-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
80569+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
80570 struct flowi *fl, struct rt6_info *rt,
80571 unsigned int flags)
80572 {
80573@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
80574 u16 proto;
80575 int err;
80576
80577+ pax_track_stack();
80578+
80579 /* Rough check on arithmetic overflow,
80580 better check is made in ip6_append_data().
80581 */
80582@@ -916,12 +918,17 @@ do_confirm:
80583 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
80584 char __user *optval, int optlen)
80585 {
80586+ struct icmp6_filter filter;
80587+
80588 switch (optname) {
80589 case ICMPV6_FILTER:
80590+ if (optlen < 0)
80591+ return -EINVAL;
80592 if (optlen > sizeof(struct icmp6_filter))
80593 optlen = sizeof(struct icmp6_filter);
80594- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
80595+ if (copy_from_user(&filter, optval, optlen))
80596 return -EFAULT;
80597+ raw6_sk(sk)->filter = filter;
80598 return 0;
80599 default:
80600 return -ENOPROTOOPT;
80601@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
80602 char __user *optval, int __user *optlen)
80603 {
80604 int len;
80605+ struct icmp6_filter filter;
80606
80607 switch (optname) {
80608 case ICMPV6_FILTER:
80609@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
80610 len = sizeof(struct icmp6_filter);
80611 if (put_user(len, optlen))
80612 return -EFAULT;
80613- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
80614+ filter = raw6_sk(sk)->filter;
80615+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
80616 return -EFAULT;
80617 return 0;
80618 default:
80619@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
80620 0, 0L, 0,
80621 sock_i_uid(sp), 0,
80622 sock_i_ino(sp),
80623- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
80624+ atomic_read(&sp->sk_refcnt),
80625+#ifdef CONFIG_GRKERNSEC_HIDESYM
80626+ NULL,
80627+#else
80628+ sp,
80629+#endif
80630+ atomic_read_unchecked(&sp->sk_drops));
80631 }
80632
80633 static int raw6_seq_show(struct seq_file *seq, void *v)
80634diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
80635index faae6df..d4430c1 100644
80636--- a/net/ipv6/tcp_ipv6.c
80637+++ b/net/ipv6/tcp_ipv6.c
80638@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
80639 }
80640 #endif
80641
80642+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80643+extern int grsec_enable_blackhole;
80644+#endif
80645+
80646 static void tcp_v6_hash(struct sock *sk)
80647 {
80648 if (sk->sk_state != TCP_CLOSE) {
80649@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
80650 return 0;
80651
80652 reset:
80653+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80654+ if (!grsec_enable_blackhole)
80655+#endif
80656 tcp_v6_send_reset(sk, skb);
80657 discard:
80658 if (opt_skb)
80659@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
80660 TCP_SKB_CB(skb)->sacked = 0;
80661
80662 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
80663- if (!sk)
80664+ if (!sk) {
80665+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80666+ ret = 1;
80667+#endif
80668 goto no_tcp_socket;
80669+ }
80670
80671 process:
80672- if (sk->sk_state == TCP_TIME_WAIT)
80673+ if (sk->sk_state == TCP_TIME_WAIT) {
80674+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80675+ ret = 2;
80676+#endif
80677 goto do_time_wait;
80678+ }
80679
80680 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
80681 goto discard_and_relse;
80682@@ -1701,6 +1716,10 @@ no_tcp_socket:
80683 bad_packet:
80684 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
80685 } else {
80686+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80687+ if (!grsec_enable_blackhole || (ret == 1 &&
80688+ (skb->dev->flags & IFF_LOOPBACK)))
80689+#endif
80690 tcp_v6_send_reset(NULL, skb);
80691 }
80692
80693@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
80694 uid,
80695 0, /* non standard timer */
80696 0, /* open_requests have no inode */
80697- 0, req);
80698+ 0,
80699+#ifdef CONFIG_GRKERNSEC_HIDESYM
80700+ NULL
80701+#else
80702+ req
80703+#endif
80704+ );
80705 }
80706
80707 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
80708@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
80709 sock_i_uid(sp),
80710 icsk->icsk_probes_out,
80711 sock_i_ino(sp),
80712- atomic_read(&sp->sk_refcnt), sp,
80713+ atomic_read(&sp->sk_refcnt),
80714+#ifdef CONFIG_GRKERNSEC_HIDESYM
80715+ NULL,
80716+#else
80717+ sp,
80718+#endif
80719 jiffies_to_clock_t(icsk->icsk_rto),
80720 jiffies_to_clock_t(icsk->icsk_ack.ato),
80721 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
80722@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
80723 dest->s6_addr32[2], dest->s6_addr32[3], destp,
80724 tw->tw_substate, 0, 0,
80725 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
80726- atomic_read(&tw->tw_refcnt), tw);
80727+ atomic_read(&tw->tw_refcnt),
80728+#ifdef CONFIG_GRKERNSEC_HIDESYM
80729+ NULL
80730+#else
80731+ tw
80732+#endif
80733+ );
80734 }
80735
80736 static int tcp6_seq_show(struct seq_file *seq, void *v)
80737diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
80738index 9cc6289..052c521 100644
80739--- a/net/ipv6/udp.c
80740+++ b/net/ipv6/udp.c
80741@@ -49,6 +49,10 @@
80742 #include <linux/seq_file.h>
80743 #include "udp_impl.h"
80744
80745+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80746+extern int grsec_enable_blackhole;
80747+#endif
80748+
80749 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
80750 {
80751 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
80752@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
80753 if (rc == -ENOMEM) {
80754 UDP6_INC_STATS_BH(sock_net(sk),
80755 UDP_MIB_RCVBUFERRORS, is_udplite);
80756- atomic_inc(&sk->sk_drops);
80757+ atomic_inc_unchecked(&sk->sk_drops);
80758 }
80759 goto drop;
80760 }
80761@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
80762 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
80763 proto == IPPROTO_UDPLITE);
80764
80765+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80766+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
80767+#endif
80768 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
80769
80770 kfree_skb(skb);
80771@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
80772 0, 0L, 0,
80773 sock_i_uid(sp), 0,
80774 sock_i_ino(sp),
80775- atomic_read(&sp->sk_refcnt), sp,
80776- atomic_read(&sp->sk_drops));
80777+ atomic_read(&sp->sk_refcnt),
80778+#ifdef CONFIG_GRKERNSEC_HIDESYM
80779+ NULL,
80780+#else
80781+ sp,
80782+#endif
80783+ atomic_read_unchecked(&sp->sk_drops));
80784 }
80785
80786 int udp6_seq_show(struct seq_file *seq, void *v)
80787diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
80788index 811984d..11f59b7 100644
80789--- a/net/irda/ircomm/ircomm_tty.c
80790+++ b/net/irda/ircomm/ircomm_tty.c
80791@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80792 add_wait_queue(&self->open_wait, &wait);
80793
80794 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
80795- __FILE__,__LINE__, tty->driver->name, self->open_count );
80796+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
80797
80798 /* As far as I can see, we protect open_count - Jean II */
80799 spin_lock_irqsave(&self->spinlock, flags);
80800 if (!tty_hung_up_p(filp)) {
80801 extra_count = 1;
80802- self->open_count--;
80803+ local_dec(&self->open_count);
80804 }
80805 spin_unlock_irqrestore(&self->spinlock, flags);
80806- self->blocked_open++;
80807+ local_inc(&self->blocked_open);
80808
80809 while (1) {
80810 if (tty->termios->c_cflag & CBAUD) {
80811@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80812 }
80813
80814 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
80815- __FILE__,__LINE__, tty->driver->name, self->open_count );
80816+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
80817
80818 schedule();
80819 }
80820@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80821 if (extra_count) {
80822 /* ++ is not atomic, so this should be protected - Jean II */
80823 spin_lock_irqsave(&self->spinlock, flags);
80824- self->open_count++;
80825+ local_inc(&self->open_count);
80826 spin_unlock_irqrestore(&self->spinlock, flags);
80827 }
80828- self->blocked_open--;
80829+ local_dec(&self->blocked_open);
80830
80831 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
80832- __FILE__,__LINE__, tty->driver->name, self->open_count);
80833+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
80834
80835 if (!retval)
80836 self->flags |= ASYNC_NORMAL_ACTIVE;
80837@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
80838 }
80839 /* ++ is not atomic, so this should be protected - Jean II */
80840 spin_lock_irqsave(&self->spinlock, flags);
80841- self->open_count++;
80842+ local_inc(&self->open_count);
80843
80844 tty->driver_data = self;
80845 self->tty = tty;
80846 spin_unlock_irqrestore(&self->spinlock, flags);
80847
80848 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
80849- self->line, self->open_count);
80850+ self->line, local_read(&self->open_count));
80851
80852 /* Not really used by us, but lets do it anyway */
80853 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
80854@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
80855 return;
80856 }
80857
80858- if ((tty->count == 1) && (self->open_count != 1)) {
80859+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
80860 /*
80861 * Uh, oh. tty->count is 1, which means that the tty
80862 * structure will be freed. state->count should always
80863@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
80864 */
80865 IRDA_DEBUG(0, "%s(), bad serial port count; "
80866 "tty->count is 1, state->count is %d\n", __func__ ,
80867- self->open_count);
80868- self->open_count = 1;
80869+ local_read(&self->open_count));
80870+ local_set(&self->open_count, 1);
80871 }
80872
80873- if (--self->open_count < 0) {
80874+ if (local_dec_return(&self->open_count) < 0) {
80875 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
80876- __func__, self->line, self->open_count);
80877- self->open_count = 0;
80878+ __func__, self->line, local_read(&self->open_count));
80879+ local_set(&self->open_count, 0);
80880 }
80881- if (self->open_count) {
80882+ if (local_read(&self->open_count)) {
80883 spin_unlock_irqrestore(&self->spinlock, flags);
80884
80885 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
80886@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
80887 tty->closing = 0;
80888 self->tty = NULL;
80889
80890- if (self->blocked_open) {
80891+ if (local_read(&self->blocked_open)) {
80892 if (self->close_delay)
80893 schedule_timeout_interruptible(self->close_delay);
80894 wake_up_interruptible(&self->open_wait);
80895@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
80896 spin_lock_irqsave(&self->spinlock, flags);
80897 self->flags &= ~ASYNC_NORMAL_ACTIVE;
80898 self->tty = NULL;
80899- self->open_count = 0;
80900+ local_set(&self->open_count, 0);
80901 spin_unlock_irqrestore(&self->spinlock, flags);
80902
80903 wake_up_interruptible(&self->open_wait);
80904@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
80905 seq_putc(m, '\n');
80906
80907 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
80908- seq_printf(m, "Open count: %d\n", self->open_count);
80909+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
80910 seq_printf(m, "Max data size: %d\n", self->max_data_size);
80911 seq_printf(m, "Max header size: %d\n", self->max_header_size);
80912
80913diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
80914index bada1b9..f325943 100644
80915--- a/net/iucv/af_iucv.c
80916+++ b/net/iucv/af_iucv.c
80917@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
80918
80919 write_lock_bh(&iucv_sk_list.lock);
80920
80921- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
80922+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
80923 while (__iucv_get_sock_by_name(name)) {
80924 sprintf(name, "%08x",
80925- atomic_inc_return(&iucv_sk_list.autobind_name));
80926+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
80927 }
80928
80929 write_unlock_bh(&iucv_sk_list.lock);
80930diff --git a/net/key/af_key.c b/net/key/af_key.c
80931index 4e98193..439b449 100644
80932--- a/net/key/af_key.c
80933+++ b/net/key/af_key.c
80934@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
80935 struct xfrm_migrate m[XFRM_MAX_DEPTH];
80936 struct xfrm_kmaddress k;
80937
80938+ pax_track_stack();
80939+
80940 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
80941 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
80942 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
80943@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
80944 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
80945 else
80946 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
80947+#ifdef CONFIG_GRKERNSEC_HIDESYM
80948+ NULL,
80949+#else
80950 s,
80951+#endif
80952 atomic_read(&s->sk_refcnt),
80953 sk_rmem_alloc_get(s),
80954 sk_wmem_alloc_get(s),
80955diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
80956index bda96d1..c038b72 100644
80957--- a/net/lapb/lapb_iface.c
80958+++ b/net/lapb/lapb_iface.c
80959@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
80960 goto out;
80961
80962 lapb->dev = dev;
80963- lapb->callbacks = *callbacks;
80964+ lapb->callbacks = callbacks;
80965
80966 __lapb_insert_cb(lapb);
80967
80968@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
80969
80970 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
80971 {
80972- if (lapb->callbacks.connect_confirmation)
80973- lapb->callbacks.connect_confirmation(lapb->dev, reason);
80974+ if (lapb->callbacks->connect_confirmation)
80975+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
80976 }
80977
80978 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
80979 {
80980- if (lapb->callbacks.connect_indication)
80981- lapb->callbacks.connect_indication(lapb->dev, reason);
80982+ if (lapb->callbacks->connect_indication)
80983+ lapb->callbacks->connect_indication(lapb->dev, reason);
80984 }
80985
80986 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
80987 {
80988- if (lapb->callbacks.disconnect_confirmation)
80989- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
80990+ if (lapb->callbacks->disconnect_confirmation)
80991+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
80992 }
80993
80994 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
80995 {
80996- if (lapb->callbacks.disconnect_indication)
80997- lapb->callbacks.disconnect_indication(lapb->dev, reason);
80998+ if (lapb->callbacks->disconnect_indication)
80999+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
81000 }
81001
81002 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
81003 {
81004- if (lapb->callbacks.data_indication)
81005- return lapb->callbacks.data_indication(lapb->dev, skb);
81006+ if (lapb->callbacks->data_indication)
81007+ return lapb->callbacks->data_indication(lapb->dev, skb);
81008
81009 kfree_skb(skb);
81010 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
81011@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
81012 {
81013 int used = 0;
81014
81015- if (lapb->callbacks.data_transmit) {
81016- lapb->callbacks.data_transmit(lapb->dev, skb);
81017+ if (lapb->callbacks->data_transmit) {
81018+ lapb->callbacks->data_transmit(lapb->dev, skb);
81019 used = 1;
81020 }
81021
81022diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
81023index fe2d3f8..e57f683 100644
81024--- a/net/mac80211/cfg.c
81025+++ b/net/mac80211/cfg.c
81026@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
81027 return err;
81028 }
81029
81030-struct cfg80211_ops mac80211_config_ops = {
81031+const struct cfg80211_ops mac80211_config_ops = {
81032 .add_virtual_intf = ieee80211_add_iface,
81033 .del_virtual_intf = ieee80211_del_iface,
81034 .change_virtual_intf = ieee80211_change_iface,
81035diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
81036index 7d7879f..2d51f62 100644
81037--- a/net/mac80211/cfg.h
81038+++ b/net/mac80211/cfg.h
81039@@ -4,6 +4,6 @@
81040 #ifndef __CFG_H
81041 #define __CFG_H
81042
81043-extern struct cfg80211_ops mac80211_config_ops;
81044+extern const struct cfg80211_ops mac80211_config_ops;
81045
81046 #endif /* __CFG_H */
81047diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
81048index 99c7525..9cb4937 100644
81049--- a/net/mac80211/debugfs_key.c
81050+++ b/net/mac80211/debugfs_key.c
81051@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
81052 size_t count, loff_t *ppos)
81053 {
81054 struct ieee80211_key *key = file->private_data;
81055- int i, res, bufsize = 2 * key->conf.keylen + 2;
81056+ int i, bufsize = 2 * key->conf.keylen + 2;
81057 char *buf = kmalloc(bufsize, GFP_KERNEL);
81058 char *p = buf;
81059+ ssize_t res;
81060+
81061+ if (buf == NULL)
81062+ return -ENOMEM;
81063
81064 for (i = 0; i < key->conf.keylen; i++)
81065 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
81066diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
81067index 33a2e89..08650c8 100644
81068--- a/net/mac80211/debugfs_sta.c
81069+++ b/net/mac80211/debugfs_sta.c
81070@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
81071 int i;
81072 struct sta_info *sta = file->private_data;
81073
81074+ pax_track_stack();
81075+
81076 spin_lock_bh(&sta->lock);
81077 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
81078 sta->ampdu_mlme.dialog_token_allocator + 1);
81079diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
81080index ca62bfe..6657a03 100644
81081--- a/net/mac80211/ieee80211_i.h
81082+++ b/net/mac80211/ieee80211_i.h
81083@@ -25,6 +25,7 @@
81084 #include <linux/etherdevice.h>
81085 #include <net/cfg80211.h>
81086 #include <net/mac80211.h>
81087+#include <asm/local.h>
81088 #include "key.h"
81089 #include "sta_info.h"
81090
81091@@ -635,7 +636,7 @@ struct ieee80211_local {
81092 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
81093 spinlock_t queue_stop_reason_lock;
81094
81095- int open_count;
81096+ local_t open_count;
81097 int monitors, cooked_mntrs;
81098 /* number of interfaces with corresponding FIF_ flags */
81099 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
81100diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
81101index 079c500..eb3c6d4 100644
81102--- a/net/mac80211/iface.c
81103+++ b/net/mac80211/iface.c
81104@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
81105 break;
81106 }
81107
81108- if (local->open_count == 0) {
81109+ if (local_read(&local->open_count) == 0) {
81110 res = drv_start(local);
81111 if (res)
81112 goto err_del_bss;
81113@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
81114 * Validate the MAC address for this device.
81115 */
81116 if (!is_valid_ether_addr(dev->dev_addr)) {
81117- if (!local->open_count)
81118+ if (!local_read(&local->open_count))
81119 drv_stop(local);
81120 return -EADDRNOTAVAIL;
81121 }
81122@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
81123
81124 hw_reconf_flags |= __ieee80211_recalc_idle(local);
81125
81126- local->open_count++;
81127+ local_inc(&local->open_count);
81128 if (hw_reconf_flags) {
81129 ieee80211_hw_config(local, hw_reconf_flags);
81130 /*
81131@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
81132 err_del_interface:
81133 drv_remove_interface(local, &conf);
81134 err_stop:
81135- if (!local->open_count)
81136+ if (!local_read(&local->open_count))
81137 drv_stop(local);
81138 err_del_bss:
81139 sdata->bss = NULL;
81140@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
81141 WARN_ON(!list_empty(&sdata->u.ap.vlans));
81142 }
81143
81144- local->open_count--;
81145+ local_dec(&local->open_count);
81146
81147 switch (sdata->vif.type) {
81148 case NL80211_IFTYPE_AP_VLAN:
81149@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
81150
81151 ieee80211_recalc_ps(local, -1);
81152
81153- if (local->open_count == 0) {
81154+ if (local_read(&local->open_count) == 0) {
81155 ieee80211_clear_tx_pending(local);
81156 ieee80211_stop_device(local);
81157
81158diff --git a/net/mac80211/main.c b/net/mac80211/main.c
81159index 2dfe176..74e4388 100644
81160--- a/net/mac80211/main.c
81161+++ b/net/mac80211/main.c
81162@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
81163 local->hw.conf.power_level = power;
81164 }
81165
81166- if (changed && local->open_count) {
81167+ if (changed && local_read(&local->open_count)) {
81168 ret = drv_config(local, changed);
81169 /*
81170 * Goal:
81171diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
81172index e67eea7..fcc227e 100644
81173--- a/net/mac80211/mlme.c
81174+++ b/net/mac80211/mlme.c
81175@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
81176 bool have_higher_than_11mbit = false, newsta = false;
81177 u16 ap_ht_cap_flags;
81178
81179+ pax_track_stack();
81180+
81181 /*
81182 * AssocResp and ReassocResp have identical structure, so process both
81183 * of them in this function.
81184diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
81185index e535f1c..4d733d1 100644
81186--- a/net/mac80211/pm.c
81187+++ b/net/mac80211/pm.c
81188@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
81189 }
81190
81191 /* stop hardware - this must stop RX */
81192- if (local->open_count)
81193+ if (local_read(&local->open_count))
81194 ieee80211_stop_device(local);
81195
81196 local->suspended = true;
81197diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
81198index b33efc4..0a2efb6 100644
81199--- a/net/mac80211/rate.c
81200+++ b/net/mac80211/rate.c
81201@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
81202 struct rate_control_ref *ref, *old;
81203
81204 ASSERT_RTNL();
81205- if (local->open_count)
81206+ if (local_read(&local->open_count))
81207 return -EBUSY;
81208
81209 ref = rate_control_alloc(name, local);
81210diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
81211index b1d7904..57e4da7 100644
81212--- a/net/mac80211/tx.c
81213+++ b/net/mac80211/tx.c
81214@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
81215 return cpu_to_le16(dur);
81216 }
81217
81218-static int inline is_ieee80211_device(struct ieee80211_local *local,
81219+static inline int is_ieee80211_device(struct ieee80211_local *local,
81220 struct net_device *dev)
81221 {
81222 return local == wdev_priv(dev->ieee80211_ptr);
81223diff --git a/net/mac80211/util.c b/net/mac80211/util.c
81224index 31b1085..48fb26d 100644
81225--- a/net/mac80211/util.c
81226+++ b/net/mac80211/util.c
81227@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
81228 local->resuming = true;
81229
81230 /* restart hardware */
81231- if (local->open_count) {
81232+ if (local_read(&local->open_count)) {
81233 /*
81234 * Upon resume hardware can sometimes be goofy due to
81235 * various platform / driver / bus issues, so restarting
81236diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
81237index 634d14a..b35a608 100644
81238--- a/net/netfilter/Kconfig
81239+++ b/net/netfilter/Kconfig
81240@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
81241
81242 To compile it as a module, choose M here. If unsure, say N.
81243
81244+config NETFILTER_XT_MATCH_GRADM
81245+ tristate '"gradm" match support'
81246+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
81247+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
81248+ ---help---
81249+ The gradm match allows to match on grsecurity RBAC being enabled.
81250+ It is useful when iptables rules are applied early on bootup to
81251+ prevent connections to the machine (except from a trusted host)
81252+ while the RBAC system is disabled.
81253+
81254 config NETFILTER_XT_MATCH_HASHLIMIT
81255 tristate '"hashlimit" match support'
81256 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
81257diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
81258index 49f62ee..a17b2c6 100644
81259--- a/net/netfilter/Makefile
81260+++ b/net/netfilter/Makefile
81261@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
81262 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
81263 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
81264 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
81265+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
81266 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
81267 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
81268 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
81269diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
81270index 3c7e427..724043c 100644
81271--- a/net/netfilter/ipvs/ip_vs_app.c
81272+++ b/net/netfilter/ipvs/ip_vs_app.c
81273@@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
81274 .open = ip_vs_app_open,
81275 .read = seq_read,
81276 .llseek = seq_lseek,
81277- .release = seq_release,
81278+ .release = seq_release_net,
81279 };
81280 #endif
81281
81282diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
81283index 95682e5..457dbac 100644
81284--- a/net/netfilter/ipvs/ip_vs_conn.c
81285+++ b/net/netfilter/ipvs/ip_vs_conn.c
81286@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
81287 /* if the connection is not template and is created
81288 * by sync, preserve the activity flag.
81289 */
81290- cp->flags |= atomic_read(&dest->conn_flags) &
81291+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
81292 (~IP_VS_CONN_F_INACTIVE);
81293 else
81294- cp->flags |= atomic_read(&dest->conn_flags);
81295+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
81296 cp->dest = dest;
81297
81298 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
81299@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
81300 atomic_set(&cp->refcnt, 1);
81301
81302 atomic_set(&cp->n_control, 0);
81303- atomic_set(&cp->in_pkts, 0);
81304+ atomic_set_unchecked(&cp->in_pkts, 0);
81305
81306 atomic_inc(&ip_vs_conn_count);
81307 if (flags & IP_VS_CONN_F_NO_CPORT)
81308@@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
81309 .open = ip_vs_conn_open,
81310 .read = seq_read,
81311 .llseek = seq_lseek,
81312- .release = seq_release,
81313+ .release = seq_release_net,
81314 };
81315
81316 static const char *ip_vs_origin_name(unsigned flags)
81317@@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
81318 .open = ip_vs_conn_sync_open,
81319 .read = seq_read,
81320 .llseek = seq_lseek,
81321- .release = seq_release,
81322+ .release = seq_release_net,
81323 };
81324
81325 #endif
81326@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
81327
81328 /* Don't drop the entry if its number of incoming packets is not
81329 located in [0, 8] */
81330- i = atomic_read(&cp->in_pkts);
81331+ i = atomic_read_unchecked(&cp->in_pkts);
81332 if (i > 8 || i < 0) return 0;
81333
81334 if (!todrop_rate[i]) return 0;
81335diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
81336index b95699f..5fee919 100644
81337--- a/net/netfilter/ipvs/ip_vs_core.c
81338+++ b/net/netfilter/ipvs/ip_vs_core.c
81339@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
81340 ret = cp->packet_xmit(skb, cp, pp);
81341 /* do not touch skb anymore */
81342
81343- atomic_inc(&cp->in_pkts);
81344+ atomic_inc_unchecked(&cp->in_pkts);
81345 ip_vs_conn_put(cp);
81346 return ret;
81347 }
81348@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
81349 * Sync connection if it is about to close to
81350 * encorage the standby servers to update the connections timeout
81351 */
81352- pkts = atomic_add_return(1, &cp->in_pkts);
81353+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
81354 if (af == AF_INET &&
81355 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
81356 (((cp->protocol != IPPROTO_TCP ||
81357diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
81358index 02b2610..2d89424 100644
81359--- a/net/netfilter/ipvs/ip_vs_ctl.c
81360+++ b/net/netfilter/ipvs/ip_vs_ctl.c
81361@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
81362 ip_vs_rs_hash(dest);
81363 write_unlock_bh(&__ip_vs_rs_lock);
81364 }
81365- atomic_set(&dest->conn_flags, conn_flags);
81366+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
81367
81368 /* bind the service */
81369 if (!dest->svc) {
81370@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
81371 " %-7s %-6d %-10d %-10d\n",
81372 &dest->addr.in6,
81373 ntohs(dest->port),
81374- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
81375+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
81376 atomic_read(&dest->weight),
81377 atomic_read(&dest->activeconns),
81378 atomic_read(&dest->inactconns));
81379@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
81380 "%-7s %-6d %-10d %-10d\n",
81381 ntohl(dest->addr.ip),
81382 ntohs(dest->port),
81383- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
81384+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
81385 atomic_read(&dest->weight),
81386 atomic_read(&dest->activeconns),
81387 atomic_read(&dest->inactconns));
81388@@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
81389 .open = ip_vs_info_open,
81390 .read = seq_read,
81391 .llseek = seq_lseek,
81392- .release = seq_release_private,
81393+ .release = seq_release_net,
81394 };
81395
81396 #endif
81397@@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
81398 .open = ip_vs_stats_seq_open,
81399 .read = seq_read,
81400 .llseek = seq_lseek,
81401- .release = single_release,
81402+ .release = single_release_net,
81403 };
81404
81405 #endif
81406@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
81407
81408 entry.addr = dest->addr.ip;
81409 entry.port = dest->port;
81410- entry.conn_flags = atomic_read(&dest->conn_flags);
81411+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
81412 entry.weight = atomic_read(&dest->weight);
81413 entry.u_threshold = dest->u_threshold;
81414 entry.l_threshold = dest->l_threshold;
81415@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
81416 unsigned char arg[128];
81417 int ret = 0;
81418
81419+ pax_track_stack();
81420+
81421 if (!capable(CAP_NET_ADMIN))
81422 return -EPERM;
81423
81424@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
81425 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
81426
81427 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
81428- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
81429+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
81430 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
81431 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
81432 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
81433diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
81434index e177f0d..55e8581 100644
81435--- a/net/netfilter/ipvs/ip_vs_sync.c
81436+++ b/net/netfilter/ipvs/ip_vs_sync.c
81437@@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
81438
81439 if (opt)
81440 memcpy(&cp->in_seq, opt, sizeof(*opt));
81441- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
81442+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
81443 cp->state = state;
81444 cp->old_state = cp->state;
81445 /*
81446diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
81447index 30b3189..e2e4b55 100644
81448--- a/net/netfilter/ipvs/ip_vs_xmit.c
81449+++ b/net/netfilter/ipvs/ip_vs_xmit.c
81450@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
81451 else
81452 rc = NF_ACCEPT;
81453 /* do not touch skb anymore */
81454- atomic_inc(&cp->in_pkts);
81455+ atomic_inc_unchecked(&cp->in_pkts);
81456 goto out;
81457 }
81458
81459@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
81460 else
81461 rc = NF_ACCEPT;
81462 /* do not touch skb anymore */
81463- atomic_inc(&cp->in_pkts);
81464+ atomic_inc_unchecked(&cp->in_pkts);
81465 goto out;
81466 }
81467
81468diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
81469index d521718..d0fd7a1 100644
81470--- a/net/netfilter/nf_conntrack_netlink.c
81471+++ b/net/netfilter/nf_conntrack_netlink.c
81472@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
81473 static int
81474 ctnetlink_parse_tuple(const struct nlattr * const cda[],
81475 struct nf_conntrack_tuple *tuple,
81476- enum ctattr_tuple type, u_int8_t l3num)
81477+ enum ctattr_type type, u_int8_t l3num)
81478 {
81479 struct nlattr *tb[CTA_TUPLE_MAX+1];
81480 int err;
81481diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
81482index f900dc3..5e45346 100644
81483--- a/net/netfilter/nfnetlink_log.c
81484+++ b/net/netfilter/nfnetlink_log.c
81485@@ -68,7 +68,7 @@ struct nfulnl_instance {
81486 };
81487
81488 static DEFINE_RWLOCK(instances_lock);
81489-static atomic_t global_seq;
81490+static atomic_unchecked_t global_seq;
81491
81492 #define INSTANCE_BUCKETS 16
81493 static struct hlist_head instance_table[INSTANCE_BUCKETS];
81494@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
81495 /* global sequence number */
81496 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
81497 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
81498- htonl(atomic_inc_return(&global_seq)));
81499+ htonl(atomic_inc_return_unchecked(&global_seq)));
81500
81501 if (data_len) {
81502 struct nlattr *nla;
81503diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
81504new file mode 100644
81505index 0000000..b1bac76
81506--- /dev/null
81507+++ b/net/netfilter/xt_gradm.c
81508@@ -0,0 +1,51 @@
81509+/*
81510+ * gradm match for netfilter
81511